code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import sys
import turtle
def A__ ( lowerCamelCase , lowerCamelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCamelCase , get_mid(lowerCamelCase , lowerCamelCase ) , get_mid(lowerCamelCase , lowerCamelCase ) , depth - 1 )
triangle(lowerCamelCase , get_mid(lowerCamelCase , lowerCamelCase ) , get_mid(lowerCamelCase , lowerCamelCase ) , depth - 1 )
triangle(lowerCamelCase , get_mid(lowerCamelCase , lowerCamelCase ) , get_mid(lowerCamelCase , lowerCamelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
lowerCamelCase_ : int = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
lowerCamelCase_ : Optional[Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 670 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = ReformerTokenizer
__UpperCamelCase : List[Any] = ReformerTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : List[str] = False
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : List[str] ):
super().setUp()
UpperCamelCase_: List[Any] = ReformerTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Optional[Any] = """<s>"""
UpperCamelCase_: Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(snake_case_ ) , 1000 )
def lowerCAmelCase__ ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase__ ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Any = self.get_rust_tokenizer()
UpperCamelCase_: List[str] = """I was born in 92000, and this is falsé."""
UpperCamelCase_: int = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: Dict = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Dict = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: List[str] = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: Union[str, Any] = tokenizer.encode(snake_case_ )
UpperCamelCase_: str = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Any=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: str = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# Simple input
UpperCamelCase_: Dict = """This is a simple input"""
UpperCamelCase_: Optional[Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCamelCase_: Dict = ("""This is a simple input""", """This is a pair""")
UpperCamelCase_: List[str] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
def lowerCAmelCase__ ( self : List[Any] ):
pass
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = ReformerTokenizer(snake_case_ , keep_accents=snake_case_ )
UpperCamelCase_: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case_ ) , [285, 46, 10, 170, 382] , )
UpperCamelCase_: Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCamelCase_: int = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(
snake_case_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCamelCase_: str = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase__ ( self : Tuple ):
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Dict = """Hello World!"""
UpperCamelCase_: str = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@slow
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
UpperCamelCase_: Union[str, Any] = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(snake_case_ , self.big_tokenizer.encode(snake_case_ ) )
@require_torch
@slow
def lowerCAmelCase__ ( self : List[str] ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
UpperCamelCase_: List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCamelCase_: Optional[int] = """ """.join(snake_case_ )
UpperCamelCase_: Tuple = self.big_tokenizer.encode_plus(snake_case_ , return_tensors="""pt""" )
UpperCamelCase_: Dict = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
UpperCamelCase_: int = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
UpperCamelCase_: int = encoded_sequence["""input_ids"""].shape
UpperCamelCase_: Optional[int] = ReformerModel(snake_case_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**snake_case_ )
model(**snake_case_ )
@slow
def lowerCAmelCase__ ( self : str ):
# fmt: off
UpperCamelCase_: List[Any] = {"""input_ids""": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
UpperCamelCase_: Optional[Any] = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=snake_case_ , sequences=snake_case_ , )
| 670 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = tempfile.mkdtemp()
UpperCamelCase_: List[Any] = BlipImageProcessor()
UpperCamelCase_: List[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCamelCase_: Dict = BlipaProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Dict ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def lowerCAmelCase__ ( self : List[Any] , **snake_case_ : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def lowerCAmelCase__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_: List[str] = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: int = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: str = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: int = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[str] = self.get_image_processor()
UpperCamelCase_: List[Any] = self.get_tokenizer()
UpperCamelCase_: Any = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: List[str] = self.prepare_image_inputs()
UpperCamelCase_: Dict = image_processor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: Optional[int] = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Any = self.get_image_processor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Dict = """lower newer"""
UpperCamelCase_: List[str] = processor(text=snake_case_ )
UpperCamelCase_: Union[str, Any] = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Tuple = self.get_image_processor()
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: List[str] = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: List[Any] = """lower newer"""
UpperCamelCase_: List[str] = self.prepare_image_inputs()
UpperCamelCase_: Dict = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[Any] = self.get_image_processor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: List[str] = processor.batch_decode(snake_case_ )
UpperCamelCase_: Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: int = self.get_image_processor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: str = BlipaProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: List[Any] = """lower newer"""
UpperCamelCase_: Optional[int] = self.prepare_image_inputs()
UpperCamelCase_: Dict = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def A__ ( lowerCamelCase ) -> Union[str, Any]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowerCamelCase ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def A__ ( lowerCamelCase ) -> List[str]:
UpperCamelCase_: Optional[Any] = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase_: int = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
UpperCamelCase_: Optional[Any] = PipelineDataFormat.from_str(
format=lowerCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowerCamelCase , lowerCamelCase )
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : Pipeline , snake_case_ : PipelineDataFormat ):
UpperCamelCase_: Dict = nlp
UpperCamelCase_: Optional[Any] = reader
@staticmethod
def lowerCAmelCase__ ( snake_case_ : ArgumentParser ):
UpperCamelCase_: int = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=snake_case_ , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=snake_case_ , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=snake_case_ , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=snake_case_ , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=snake_case_ , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=snake_case_ , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=snake_case_ , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=snake_case_ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_, UpperCamelCase_: str = self._nlp, []
for entry in self._reader:
UpperCamelCase_: Union[str, Any] = nlp(**snake_case_ ) if self._reader.is_multi_columns else nlp(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
outputs.append(snake_case_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase_: Optional[int] = self._reader.save_binary(snake_case_ )
logger.warning(f'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(snake_case_ )
| 670 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 1 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = BioGptTokenizer
__UpperCamelCase : int = False
def lowerCAmelCase__ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase_: List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCamelCase_: List[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
UpperCamelCase_: Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(snake_case_ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
UpperCamelCase_: Dict = """lower newer"""
UpperCamelCase_: Optional[int] = """lower newer"""
return input_text, output_text
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCamelCase_: Optional[int] = """lower"""
UpperCamelCase_: Tuple = ["""low""", """er</w>"""]
UpperCamelCase_: Union[str, Any] = tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: Any = tokens + ["""<unk>"""]
UpperCamelCase_: Any = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
UpperCamelCase_: Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case_ )
UpperCamelCase_: List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case_ )
UpperCamelCase_: Any = tokenizer.build_inputs_with_special_tokens(snake_case_ )
UpperCamelCase_: Dict = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 670 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = MBartConfig
__UpperCamelCase : List[str] = {}
__UpperCamelCase : List[Any] = """gelu"""
def __init__( self : List[str] , snake_case_ : Dict , snake_case_ : int=13 , snake_case_ : str=7 , snake_case_ : List[str]=True , snake_case_ : Any=False , snake_case_ : Any=99 , snake_case_ : Tuple=32 , snake_case_ : Dict=2 , snake_case_ : Optional[Any]=4 , snake_case_ : Any=37 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Tuple=20 , snake_case_ : Optional[int]=2 , snake_case_ : Union[str, Any]=1 , snake_case_ : Optional[int]=0 , ):
UpperCamelCase_: Optional[int] = parent
UpperCamelCase_: Union[str, Any] = batch_size
UpperCamelCase_: List[str] = seq_length
UpperCamelCase_: Dict = is_training
UpperCamelCase_: Optional[int] = use_labels
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Dict = hidden_size
UpperCamelCase_: int = num_hidden_layers
UpperCamelCase_: Optional[Any] = num_attention_heads
UpperCamelCase_: str = intermediate_size
UpperCamelCase_: Optional[int] = hidden_dropout_prob
UpperCamelCase_: Dict = attention_probs_dropout_prob
UpperCamelCase_: Any = max_position_embeddings
UpperCamelCase_: Union[str, Any] = eos_token_id
UpperCamelCase_: Optional[int] = pad_token_id
UpperCamelCase_: Optional[Any] = bos_token_id
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_: Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_: int = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_: Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase_: Any = prepare_mbart_inputs_dict(snake_case_ , snake_case_ , snake_case_ )
return config, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : str ):
UpperCamelCase_: List[Any] = TFMBartModel(config=snake_case_ ).get_decoder()
UpperCamelCase_: Any = inputs_dict["""input_ids"""]
UpperCamelCase_: List[str] = input_ids[:1, :]
UpperCamelCase_: Tuple = inputs_dict["""attention_mask"""][:1, :]
UpperCamelCase_: Union[str, Any] = inputs_dict["""head_mask"""]
UpperCamelCase_: Any = 1
# first forward pass
UpperCamelCase_: List[str] = model(snake_case_ , attention_mask=snake_case_ , head_mask=snake_case_ , use_cache=snake_case_ )
UpperCamelCase_, UpperCamelCase_: List[str] = outputs.to_tuple()
UpperCamelCase_: Union[str, Any] = past_key_values[1]
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ) -> Any:
if attention_mask is None:
UpperCamelCase_: Tuple = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCamelCase_: str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCamelCase_: Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCamelCase_: Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCamelCase_: Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__UpperCamelCase : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__UpperCamelCase : Optional[Any] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCamelCase : str = True
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : List[str] = False
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : str ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: List[str] = TFMBartModelTester(self )
UpperCamelCase_: List[Any] = ConfigTester(self , config_class=snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
__UpperCamelCase : Any = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
__UpperCamelCase : Optional[int] = """facebook/mbart-large-en-ro"""
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCAmelCase__ ( self : Any , **snake_case_ : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = self.translate_src_text(**snake_case_ )
self.assertListEqual(self.expected_text , snake_case_ )
def lowerCAmelCase__ ( self : List[str] , **snake_case_ : Any ):
UpperCamelCase_: str = self.tokenizer(self.src_text , **snake_case_ , return_tensors="""tf""" )
UpperCamelCase_: Any = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
UpperCamelCase_: Dict = self.tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
return generated_words
@slow
def lowerCAmelCase__ ( self : str ):
self._assert_generated_batch_equal_expected()
| 670 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowerCamelCase ), magnitude * sin(lowerCamelCase )]
return [magnitude * cos(radians(lowerCamelCase ) ), magnitude * sin(radians(lowerCamelCase ) )]
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 10**-1 ) -> bool:
UpperCamelCase_: NDArray[floataa] = cross(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: float = sum(lowerCamelCase )
return abs(lowerCamelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase_ : Union[str, Any] = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowerCamelCase_ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase_ : List[str] = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowerCamelCase_ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase_ : str = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowerCamelCase_ : Optional[Any] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 670 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Optional[Any] = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : List[str] = logging.getLogger()
def A__ ( ) -> List[Any]:
UpperCamelCase_: str = argparse.ArgumentParser()
parser.add_argument("""-f""" )
UpperCamelCase_: List[str] = parser.parse_args()
return args.f
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: List[str] = {}
UpperCamelCase_: List[str] = os.path.join(lowerCamelCase , """all_results.json""" )
if os.path.exists(lowerCamelCase ):
with open(lowerCamelCase , """r""" ) as f:
UpperCamelCase_: int = json.load(lowerCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def A__ ( ) -> Any:
UpperCamelCase_: Tuple = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
lowerCamelCase_ : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls : List[str] ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
UpperCamelCase_: Dict = tempfile.mkdtemp()
UpperCamelCase_: Union[str, Any] = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
UpperCamelCase_: Any = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Optional[Any] = f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
UpperCamelCase_: List[Any] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Union[str, Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
UpperCamelCase_: List[str] = get_results(snake_case_ )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: str = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Tuple = f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: str = get_results(snake_case_ )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Dict ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCamelCase_: List[str] = 7 if get_gpu_count() > 1 else 2
UpperCamelCase_: str = self.get_auto_remove_tmp_dir()
UpperCamelCase_: str = f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: Union[str, Any] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Dict = self.get_auto_remove_tmp_dir()
UpperCamelCase_: List[Any] = f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: Optional[int] = get_results(snake_case_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: str = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Tuple = f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: Optional[int] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Tuple = f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: List[str] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Union[str, Any] = f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: Optional[Any] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """translation_no_trainer""" ) ) )
@slow
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(snake_case_ )
UpperCamelCase_: List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Optional[Any] = f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
UpperCamelCase_: int = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[int] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: int = f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
UpperCamelCase_: str = get_results(snake_case_ )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , """image_classification_no_trainer""" ) ) )
| 670 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
lowerCamelCase_ : str = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = """switch_transformers"""
__UpperCamelCase : Dict = ["""past_key_values"""]
__UpperCamelCase : List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=3_2128 , snake_case_ : List[str]=768 , snake_case_ : int=64 , snake_case_ : Optional[int]=2048 , snake_case_ : Dict=64 , snake_case_ : Dict=12 , snake_case_ : Optional[int]=3 , snake_case_ : str=12 , snake_case_ : Tuple=3 , snake_case_ : Optional[int]=12 , snake_case_ : Union[str, Any]=8 , snake_case_ : List[Any]=False , snake_case_ : List[str]=0.01 , snake_case_ : Tuple="float32" , snake_case_ : Union[str, Any]=False , snake_case_ : Union[str, Any]=32 , snake_case_ : Union[str, Any]=128 , snake_case_ : Any=0.1 , snake_case_ : int=1e-6 , snake_case_ : List[str]=0.001 , snake_case_ : List[str]=0.001 , snake_case_ : Union[str, Any]=1.0 , snake_case_ : str="relu" , snake_case_ : int=True , snake_case_ : Dict=False , snake_case_ : Optional[Any]=True , snake_case_ : Dict=0 , snake_case_ : Tuple=1 , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Tuple = vocab_size
UpperCamelCase_: Any = d_model
UpperCamelCase_: Any = d_kv
UpperCamelCase_: Union[str, Any] = d_ff
UpperCamelCase_: Optional[Any] = num_sparse_encoder_layers
UpperCamelCase_: Tuple = num_layers
UpperCamelCase_: List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase_: str = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
UpperCamelCase_: Any = self.num_layers // self.num_sparse_encoder_layers
else:
UpperCamelCase_: Dict = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
UpperCamelCase_: Tuple = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
UpperCamelCase_: Optional[int] = self.num_decoder_layers # HACK: this will create 0 sparse layers
UpperCamelCase_: List[str] = num_heads
UpperCamelCase_: Optional[int] = num_experts
UpperCamelCase_: Union[str, Any] = expert_capacity
UpperCamelCase_: Tuple = router_bias
UpperCamelCase_: str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
UpperCamelCase_: Tuple = router_dtype
UpperCamelCase_: Optional[int] = router_ignore_padding_tokens
UpperCamelCase_: Optional[Any] = relative_attention_num_buckets
UpperCamelCase_: List[Any] = relative_attention_max_distance
UpperCamelCase_: List[Any] = dropout_rate
UpperCamelCase_: Optional[int] = layer_norm_epsilon
UpperCamelCase_: Any = initializer_factor
UpperCamelCase_: Optional[int] = feed_forward_proj
UpperCamelCase_: Tuple = use_cache
UpperCamelCase_: Tuple = add_router_probs
UpperCamelCase_: Union[str, Any] = router_z_loss_coef
UpperCamelCase_: Tuple = router_aux_loss_coef
UpperCamelCase_: Tuple = self.feed_forward_proj.split("""-""" )
UpperCamelCase_: str = act_info[-1]
UpperCamelCase_: Dict = act_info[0] == """gated"""
if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCamelCase_: Any = """gelu_new"""
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , **snake_case_ , )
| 670 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A__ ( lowerCamelCase ) -> tuple:
return (data["data"], data["target"])
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> np.ndarray:
UpperCamelCase_: str = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase , lowerCamelCase )
# Predict target for test data
UpperCamelCase_: List[str] = xgb.predict(lowerCamelCase )
UpperCamelCase_: Optional[Any] = predictions.reshape(len(lowerCamelCase ) , 1 )
return predictions
def A__ ( ) -> None:
UpperCamelCase_: Union[str, Any] = fetch_california_housing()
UpperCamelCase_, UpperCamelCase_: Optional[Any] = data_handling(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = train_test_split(
lowerCamelCase , lowerCamelCase , test_size=0.25 , random_state=1 )
UpperCamelCase_: str = xgboost(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowerCamelCase , lowerCamelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowerCamelCase , lowerCamelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase_ : Dict = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCamelCase_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
def A__ ( lowerCamelCase ) -> list:
def merge(lowerCamelCase , lowerCamelCase ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(lowerCamelCase ) <= 1:
return collection
UpperCamelCase_: List[str] = len(lowerCamelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase_ : Any = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
# Load checkpoint
UpperCamelCase_: List[Any] = torch.load(lowerCamelCase , map_location="""cpu""" )
UpperCamelCase_: Optional[int] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
UpperCamelCase_: List[str] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase_: str = v
else:
UpperCamelCase_: Tuple = v
UpperCamelCase_: Tuple = chkpt["""params"""]
UpperCamelCase_: Any = {n: v for n, v in config.items() if not isinstance(lowerCamelCase , (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase_: str = chkpt["""dico_word2id"""]
UpperCamelCase_: str = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase_: Dict = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase_: Optional[int] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
UpperCamelCase_: Any = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(lowerCamelCase , lowerCamelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , indent=2 ) + """\n""" )
print(F'''Save vocab file to {pytorch_config_dump_path}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowerCamelCase , indent=2 ) + """\n""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Tuple = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Dict = """linear"""
__UpperCamelCase : List[Any] = """cosine"""
__UpperCamelCase : int = """cosine_with_restarts"""
__UpperCamelCase : List[str] = """polynomial"""
__UpperCamelCase : Any = """constant"""
__UpperCamelCase : List[str] = """constant_with_warmup"""
__UpperCamelCase : str = """piecewise_constant"""
def A__ ( lowerCamelCase , lowerCamelCase = -1 ) -> Any:
return LambdaLR(lowerCamelCase , lambda lowerCamelCase : 1 , last_epoch=lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = -1 ) -> Dict:
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1.0 , lowerCamelCase ) )
return 1.0
return LambdaLR(lowerCamelCase , lowerCamelCase , last_epoch=lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = -1 ) -> Optional[int]:
UpperCamelCase_: Dict = {}
UpperCamelCase_: List[str] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
UpperCamelCase_, UpperCamelCase_: Tuple = rule_str.split(""":""" )
UpperCamelCase_: Dict = int(lowerCamelCase )
UpperCamelCase_: Dict = float(lowerCamelCase )
UpperCamelCase_: Any = value
UpperCamelCase_: Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase , lowerCamelCase ):
def rule_func(lowerCamelCase ) -> float:
UpperCamelCase_: Dict = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCamelCase_: Optional[Any] = create_rules_function(lowerCamelCase , lowerCamelCase )
return LambdaLR(lowerCamelCase , lowerCamelCase , last_epoch=lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=-1 ) -> Optional[Any]:
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0.5 , lowerCamelCase = -1 ) -> Dict:
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
UpperCamelCase_: Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = -1 ) -> Optional[Any]:
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
UpperCamelCase_: Optional[int] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1E-7 , lowerCamelCase=1.0 , lowerCamelCase=-1 ) -> List[str]:
UpperCamelCase_: List[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(lowerCamelCase ):
if current_step < num_warmup_steps:
return float(lowerCamelCase ) / float(max(1 , lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCamelCase_: Optional[int] = lr_init - lr_end
UpperCamelCase_: Optional[Any] = num_training_steps - num_warmup_steps
UpperCamelCase_: Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCamelCase_: Optional[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Optional[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 1.0 , lowerCamelCase = -1 , ) -> Optional[int]:
UpperCamelCase_: List[Any] = SchedulerType(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase , last_epoch=lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase , step_rules=lowerCamelCase , last_epoch=lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase , num_warmup_steps=lowerCamelCase , last_epoch=lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , num_cycles=lowerCamelCase , last_epoch=lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , power=lowerCamelCase , last_epoch=lowerCamelCase , )
return schedule_func(
lowerCamelCase , num_warmup_steps=lowerCamelCase , num_training_steps=lowerCamelCase , last_epoch=lowerCamelCase )
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: List[str] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
UpperCamelCase_: Any = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw ).convert("""RGB""" )
return image
def A__ ( lowerCamelCase ) -> str:
UpperCamelCase_: Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
UpperCamelCase_: Any = dct.pop(lowerCamelCase )
UpperCamelCase_: Any = val
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase_: List[str] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCamelCase_: Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCamelCase_: List[Any] = torch.cat((q_bias, torch.zeros_like(lowerCamelCase , requires_grad=lowerCamelCase ), v_bias) )
UpperCamelCase_: int = qkv_bias
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: Optional[int] = 3_64 if """coco""" in model_name else 2_24
UpperCamelCase_: Optional[int] = BlipaVisionConfig(image_size=lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase_: Optional[Any] = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase_: int = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase_: Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase_: str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
UpperCamelCase_: Optional[Any] = BlipaConfig(vision_config=lowerCamelCase , text_config=lowerCamelCase )
return config, image_size
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ) -> str:
UpperCamelCase_: List[str] = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
UpperCamelCase_: List[Any] = tokenizer("""\n""" , add_special_tokens=lowerCamelCase ).input_ids[0]
UpperCamelCase_, UpperCamelCase_: List[str] = get_blipa_config(lowerCamelCase , eos_token_id=lowerCamelCase )
UpperCamelCase_: Any = BlipaForConditionalGeneration(lowerCamelCase ).eval()
UpperCamelCase_: List[Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
UpperCamelCase_, UpperCamelCase_: List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
UpperCamelCase_: str = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Any = load_model_and_preprocess(
name=lowerCamelCase , model_type=lowerCamelCase , is_eval=lowerCamelCase , device=lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
UpperCamelCase_: List[Any] = original_model.state_dict()
UpperCamelCase_: Union[str, Any] = create_rename_keys(lowerCamelCase )
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase_: List[Any] = state_dict.pop(lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
UpperCamelCase_: Dict = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
UpperCamelCase_: int = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
UpperCamelCase_: int = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
UpperCamelCase_: Optional[int] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
UpperCamelCase_: Any = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
UpperCamelCase_: Dict = key.replace("""t5""" , """language""" )
UpperCamelCase_: Optional[int] = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase , lowerCamelCase )
UpperCamelCase_, UpperCamelCase_: Dict = hf_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert len(lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase_: List[Any] = load_demo_image()
UpperCamelCase_: Optional[int] = vis_processors["""eval"""](lowerCamelCase ).unsqueeze(0 ).to(lowerCamelCase )
UpperCamelCase_: Tuple = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(lowerCamelCase )
# create processor
UpperCamelCase_: str = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=lowerCamelCase , image_std=lowerCamelCase )
UpperCamelCase_: Dict = BlipaProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase )
UpperCamelCase_: Optional[Any] = processor(images=lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
original_model.to(lowerCamelCase )
hf_model.to(lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase_: Any = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
UpperCamelCase_: Dict = hf_model(lowerCamelCase , lowerCamelCase ).logits
else:
UpperCamelCase_: List[str] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
UpperCamelCase_: Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
UpperCamelCase_: Union[str, Any] = hf_model(lowerCamelCase , lowerCamelCase , labels=lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase_: int = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase_: Dict = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowerCamelCase )
else:
# cast to same type
UpperCamelCase_: str = logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase ) , lowerCamelCase , atol=1E-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
UpperCamelCase_: Optional[Any] = """"""
UpperCamelCase_: Union[str, Any] = tokenizer(lowerCamelCase , return_tensors="""pt""" ).input_ids.to(lowerCamelCase )
UpperCamelCase_: List[str] = original_model.generate({"""image""": original_pixel_values} )
UpperCamelCase_: Any = hf_model.generate(
lowerCamelCase , lowerCamelCase , do_sample=lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , lowerCamelCase )
UpperCamelCase_: Any = input_ids.shape[1]
UpperCamelCase_: Dict = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCamelCase )
UpperCamelCase_: Any = [text.strip() for text in output_text]
print("""HF generation:""" , lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
lowerCamelCase_ : Dict = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Dict = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
UpperCamelCase_: List[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def lowerCAmelCase__ ( self : Any ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase_: Optional[Any] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase__ ( self : Dict ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase_: Tuple = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Tuple = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCAmelCase__ ( self : Tuple ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCamelCase_: Optional[int] = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase_ : int = Accelerator()
lowerCamelCase_ : Any = (accelerator.state.process_index + 2, 10)
lowerCamelCase_ : Tuple = torch.randint(0, 10, shape).to(accelerator.device)
lowerCamelCase_ : Optional[int] = """"""
lowerCamelCase_ : int = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase_ : str = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase_ : List[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCamelCase_ : Tuple = logging.getLogger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Any = """token-classification"""
def __init__( self : List[Any] , snake_case_ : int ):
if type(snake_case_ ) == dict:
UpperCamelCase_: List[str] = Namespace(**snake_case_ )
UpperCamelCase_: Any = import_module("""tasks""" )
try:
UpperCamelCase_: List[Any] = getattr(snake_case_ , hparams.task_type )
UpperCamelCase_: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
UpperCamelCase_: Tuple = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase_: Tuple = CrossEntropyLoss().ignore_index
super().__init__(snake_case_ , len(self.labels ) , self.mode )
def lowerCAmelCase__ ( self : List[Any] , **snake_case_ : str ):
return self.model(**snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : Dict , snake_case_ : int ):
UpperCamelCase_: List[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase_: Tuple = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase_: Union[str, Any] = self(**snake_case_ )
UpperCamelCase_: List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase_: str = self._feature_file(snake_case_ )
if os.path.exists(snake_case_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , snake_case_ )
UpperCamelCase_: Optional[int] = torch.load(snake_case_ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
UpperCamelCase_: Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , snake_case_ )
UpperCamelCase_: Tuple = self.token_classification_task.convert_examples_to_features(
snake_case_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , snake_case_ )
torch.save(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : int , snake_case_ : int , snake_case_ : bool = False ):
UpperCamelCase_: int = self._feature_file(snake_case_ )
logger.info("""Loading features from cached file %s""" , snake_case_ )
UpperCamelCase_: Optional[int] = torch.load(snake_case_ )
UpperCamelCase_: Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase_: Tuple = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase_: str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase_: Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase_: List[Any] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , batch_size=snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : str , snake_case_ : List[Any] ):
"""Compute validation""" ""
UpperCamelCase_: str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase_: Optional[Any] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase_: int = self(**snake_case_ )
UpperCamelCase_, UpperCamelCase_: Optional[int] = outputs[:2]
UpperCamelCase_: Tuple = logits.detach().cpu().numpy()
UpperCamelCase_: List[Any] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
UpperCamelCase_: int = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
UpperCamelCase_: List[Any] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
UpperCamelCase_: List[Any] = np.argmax(snake_case_ , axis=2 )
UpperCamelCase_: List[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
UpperCamelCase_: List[Any] = dict(enumerate(self.labels ) )
UpperCamelCase_: Union[str, Any] = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase_: int = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase_: Dict = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(snake_case_ , snake_case_ ),
"""precision""": precision_score(snake_case_ , snake_case_ ),
"""recall""": recall_score(snake_case_ , snake_case_ ),
"""f1""": fa_score(snake_case_ , snake_case_ ),
}
UpperCamelCase_: List[str] = dict(results.items() )
UpperCamelCase_: List[str] = results
return ret, preds_list, out_label_list
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[Any] ):
# when stable
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Dict = self._eval_end(snake_case_ )
UpperCamelCase_: Tuple = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase__ ( self : Dict , snake_case_ : Union[str, Any] ):
# updating to test_epoch_end instead of deprecated test_end
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Union[str, Any] = self._eval_end(snake_case_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase_: Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase__ ( snake_case_ : List[Any] , snake_case_ : Optional[int] ):
# Add NER specific options
BaseTransformer.add_model_specific_args(snake_case_ , snake_case_ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=snake_case_ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=snake_case_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=snake_case_ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=snake_case_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCamelCase_ : str = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCamelCase_ : Optional[int] = parser.parse_args()
lowerCamelCase_ : List[Any] = NERTransformer(args)
lowerCamelCase_ : Dict = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCamelCase_ : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
lowerCamelCase_ : List[Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def A__ ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 670 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 1 |
import re
def A__ ( lowerCamelCase ) -> list:
return [char.split() for char in re.split(r"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def A__ ( lowerCamelCase ) -> str:
UpperCamelCase_: Any = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
try:
UpperCamelCase_: int = split_input(lowerCamelCase )
if upper:
UpperCamelCase_: Optional[int] = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCamelCase_: List[Any] = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A__ ( lowerCamelCase ) -> str:
return to_simple_case(lowerCamelCase )
def A__ ( lowerCamelCase ) -> str:
try:
UpperCamelCase_: Optional[int] = to_simple_case(lowerCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
return to_complex_case(lowerCamelCase , lowerCamelCase , """_""" )
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
return to_complex_case(lowerCamelCase , lowerCamelCase , """-""" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def A__ ( lowerCamelCase ) -> str:
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
UpperCamelCase_: Dict = precision
UpperCamelCase_: List[Any] = ceil(precision / 14 )
UpperCamelCase_: List[Any] = 42_68_80 * Decimal(1_00_05 ).sqrt()
UpperCamelCase_: List[Any] = 1
UpperCamelCase_: Optional[Any] = 13_59_14_09
UpperCamelCase_: List[str] = Decimal(lowerCamelCase )
for k in range(1 , lowerCamelCase ):
UpperCamelCase_: List[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCamelCase_ : Dict = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 670 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 670 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 1 |
lowerCamelCase_ : str = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCamelCase_ : int = frozenset(["""prompt""", """negative_prompt"""])
lowerCamelCase_ : Optional[int] = frozenset([])
lowerCamelCase_ : Dict = frozenset(["""image"""])
lowerCamelCase_ : Union[str, Any] = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase_ : Tuple = frozenset(["""image"""])
lowerCamelCase_ : str = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCamelCase_ : str = frozenset(["""prompt""", """image""", """negative_prompt"""])
lowerCamelCase_ : int = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCamelCase_ : Any = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
lowerCamelCase_ : Any = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase_ : Optional[Any] = frozenset(["""image""", """mask_image"""])
lowerCamelCase_ : Dict = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase_ : Dict = frozenset(["""example_image""", """image""", """mask_image"""])
lowerCamelCase_ : Any = frozenset(["""class_labels"""])
lowerCamelCase_ : Tuple = frozenset(["""class_labels"""])
lowerCamelCase_ : List[Any] = frozenset(["""batch_size"""])
lowerCamelCase_ : List[Any] = frozenset([])
lowerCamelCase_ : Dict = frozenset(["""batch_size"""])
lowerCamelCase_ : Any = frozenset([])
lowerCamelCase_ : int = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCamelCase_ : Optional[Any] = frozenset(["""prompt""", """negative_prompt"""])
lowerCamelCase_ : Tuple = frozenset(["""input_tokens"""])
lowerCamelCase_ : Tuple = frozenset(["""input_tokens"""])
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase_ : Tuple = random.Random()
def A__ ( lowerCamelCase , lowerCamelCase=1.0 , lowerCamelCase=None , lowerCamelCase=None ) -> Any:
if rng is None:
UpperCamelCase_: List[str] = global_rng
UpperCamelCase_: str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Any=7 , snake_case_ : Optional[int]=400 , snake_case_ : Any=2000 , snake_case_ : Any=2048 , snake_case_ : List[Any]=128 , snake_case_ : Dict=1 , snake_case_ : str=512 , snake_case_ : Dict=30 , snake_case_ : Optional[int]=4_4100 , ):
UpperCamelCase_: str = parent
UpperCamelCase_: Tuple = batch_size
UpperCamelCase_: Dict = min_seq_length
UpperCamelCase_: Optional[int] = max_seq_length
UpperCamelCase_: Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_: Union[str, Any] = spectrogram_length
UpperCamelCase_: Dict = feature_size
UpperCamelCase_: int = num_audio_channels
UpperCamelCase_: int = hop_length
UpperCamelCase_: Any = chunk_length
UpperCamelCase_: Optional[Any] = sampling_rate
def lowerCAmelCase__ ( self : int ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any]=False , snake_case_ : int=False ):
def _flatten(snake_case_ : Union[str, Any] ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
UpperCamelCase_: Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_: Optional[int] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_: Union[str, Any] = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = TvltFeatureExtractor
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = TvltFeatureExtractionTester(self )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case_ , """spectrogram_length""" ) )
self.assertTrue(hasattr(snake_case_ , """feature_size""" ) )
self.assertTrue(hasattr(snake_case_ , """num_audio_channels""" ) )
self.assertTrue(hasattr(snake_case_ , """hop_length""" ) )
self.assertTrue(hasattr(snake_case_ , """chunk_length""" ) )
self.assertTrue(hasattr(snake_case_ , """sampling_rate""" ) )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: Optional[int] = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
UpperCamelCase_: Union[str, Any] = self.feature_extraction_class.from_pretrained(snake_case_ )
UpperCamelCase_: Any = feat_extract_first.to_dict()
UpperCamelCase_: List[str] = feat_extract_second.to_dict()
UpperCamelCase_: Any = dict_first.pop("""mel_filters""" )
UpperCamelCase_: Optional[Any] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: int = os.path.join(snake_case_ , """feat_extract.json""" )
feat_extract_first.to_json_file(snake_case_ )
UpperCamelCase_: Optional[Any] = self.feature_extraction_class.from_json_file(snake_case_ )
UpperCamelCase_: Any = feat_extract_first.to_dict()
UpperCamelCase_: str = feat_extract_second.to_dict()
UpperCamelCase_: Dict = dict_first.pop("""mel_filters""" )
UpperCamelCase_: Any = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : int ):
# Initialize feature_extractor
UpperCamelCase_: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_: Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_: List[str] = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase_: Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
UpperCamelCase_: Any = feature_extractor(snake_case_ , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
UpperCamelCase_: str = feature_extractor(
snake_case_ , return_tensors="""np""" , sampling_rate=4_4100 , mask_audio=snake_case_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
UpperCamelCase_: Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase_: int = np.asarray(snake_case_ )
UpperCamelCase_: Optional[int] = feature_extractor(snake_case_ , return_tensors="""np""" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[Any] ):
UpperCamelCase_: Optional[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
UpperCamelCase_: Any = ds.sort("""id""" ).select(range(snake_case_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = self._load_datasamples(1 )
UpperCamelCase_: str = TvltFeatureExtractor()
UpperCamelCase_: str = feature_extractor(snake_case_ , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
UpperCamelCase_: Dict = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case_ , atol=1e-4 ) )
| 670 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : int = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : str = """trocr"""
__UpperCamelCase : List[Any] = ["""past_key_values"""]
__UpperCamelCase : int = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Union[str, Any] , snake_case_ : Tuple=5_0265 , snake_case_ : Dict=1024 , snake_case_ : str=12 , snake_case_ : int=16 , snake_case_ : Optional[Any]=4096 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[Any]=512 , snake_case_ : List[Any]=0.1 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : str=0.0 , snake_case_ : List[Any]=2 , snake_case_ : Optional[int]=0.02 , snake_case_ : Dict=0.0 , snake_case_ : List[str]=True , snake_case_ : Optional[int]=False , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[Any]=True , snake_case_ : str=1 , snake_case_ : Tuple=0 , snake_case_ : Optional[Any]=2 , **snake_case_ : str , ):
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Any = d_model
UpperCamelCase_: Union[str, Any] = decoder_layers
UpperCamelCase_: str = decoder_attention_heads
UpperCamelCase_: int = decoder_ffn_dim
UpperCamelCase_: Union[str, Any] = activation_function
UpperCamelCase_: Any = max_position_embeddings
UpperCamelCase_: Tuple = dropout
UpperCamelCase_: Union[str, Any] = attention_dropout
UpperCamelCase_: Union[str, Any] = activation_dropout
UpperCamelCase_: int = init_std
UpperCamelCase_: List[str] = decoder_layerdrop
UpperCamelCase_: int = use_cache
UpperCamelCase_: Tuple = scale_embedding
UpperCamelCase_: List[str] = use_learned_position_embeddings
UpperCamelCase_: Dict = layernorm_embedding
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
| 670 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = MODEL_FOR_MASKED_LM_MAPPING
__UpperCamelCase : List[Any] = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowerCAmelCase__ ( self : List[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
UpperCamelCase_: Optional[int] = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_8015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_5506, """token_str""": """ accuser"""},
] , )
UpperCamelCase_: List[Any] = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_8015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_5506,
"""token_str""": """ accuser""",
},
] , )
UpperCamelCase_: Dict = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Tuple = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
UpperCamelCase_: Tuple = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_5676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
UpperCamelCase_: Dict = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS"""},
] , )
UpperCamelCase_: int = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_3606, """token_str""": """ Clara"""},
] , )
UpperCamelCase_: Tuple = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_5676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_6416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: str = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
UpperCamelCase_: List[Any] = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(snake_case_ , snake_case_ )
@slow
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(snake_case_ )
@slow
@require_tf
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: List[Any] = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Any = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
UpperCamelCase_: int = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 1_2790,
"""token_str""": """ Lyon""",
},
] , )
UpperCamelCase_: Optional[Any] = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(snake_case_ ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 1_3606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[int] = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
UpperCamelCase_: Any = None
UpperCamelCase_: List[str] = None
self.run_pipeline_test(snake_case_ , [] )
@require_tf
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
UpperCamelCase_: Dict = None
UpperCamelCase_: Optional[int] = None
self.run_pipeline_test(snake_case_ , [] )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str , snake_case_ : Tuple ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
UpperCamelCase_: Optional[Any] = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
UpperCamelCase_: List[Any] = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Any , snake_case_ : List[str] ):
UpperCamelCase_: str = fill_masker.tokenizer
UpperCamelCase_: str = fill_masker.model
UpperCamelCase_: Optional[int] = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
snake_case_ , [
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
] , )
UpperCamelCase_: int = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
snake_case_ , [
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
] , )
UpperCamelCase_: List[Any] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
snake_case_ , [
[
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
],
[
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
],
] , )
with self.assertRaises(snake_case_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(snake_case_ ):
fill_masker("""This is""" )
self.run_test_top_k(snake_case_ , snake_case_ )
self.run_test_targets(snake_case_ , snake_case_ )
self.run_test_top_k_targets(snake_case_ , snake_case_ )
self.fill_mask_with_duplicate_targets_and_top_k(snake_case_ , snake_case_ )
self.fill_mask_with_multiple_masks(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
UpperCamelCase_: Optional[int] = tokenizer.get_vocab()
UpperCamelCase_: Optional[Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase_: Dict = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ , targets=snake_case_ )
UpperCamelCase_: str = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
snake_case_ , [
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
] , )
UpperCamelCase_: Union[str, Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , snake_case_ )
UpperCamelCase_: List[str] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case_ ) )
# Call argument
UpperCamelCase_: Union[str, Any] = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
UpperCamelCase_: Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case_ )
self.assertEqual(
snake_case_ , [
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
] , )
UpperCamelCase_: List[Any] = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , snake_case_ )
UpperCamelCase_: Optional[Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(snake_case_ ) )
# Score equivalence
UpperCamelCase_: Optional[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case_ )
UpperCamelCase_: str = [top_mask["""token_str"""] for top_mask in outputs]
UpperCamelCase_: Tuple = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case_ ) == set(snake_case_ ):
UpperCamelCase_: Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=snake_case_ )
UpperCamelCase_: Optional[int] = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(snake_case_ ) , nested_simplify(snake_case_ ) )
# Raises with invalid
with self.assertRaises(snake_case_ ):
UpperCamelCase_: List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(snake_case_ ):
UpperCamelCase_: Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""""""] )
with self.assertRaises(snake_case_ ):
UpperCamelCase_: List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="""""" )
def lowerCAmelCase__ ( self : Dict , snake_case_ : Tuple , snake_case_ : List[str] ):
UpperCamelCase_: str = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ , top_k=2 )
UpperCamelCase_: List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
snake_case_ , [
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
] , )
UpperCamelCase_: int = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
UpperCamelCase_: Dict = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
snake_case_ , [
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
] , )
self.assertEqual(nested_simplify(snake_case_ ) , nested_simplify(snake_case_ ) )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : int , snake_case_ : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = tokenizer.get_vocab()
UpperCamelCase_: Optional[int] = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
# top_k=2, ntargets=3
UpperCamelCase_: Tuple = sorted(vocab.keys() )[:3]
UpperCamelCase_: List[str] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=snake_case_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase_: int = [el["""token_str"""] for el in sorted(snake_case_ , key=lambda snake_case_ : x["score"] , reverse=snake_case_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(snake_case_ ).issubset(snake_case_ ):
UpperCamelCase_: Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=snake_case_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(snake_case_ ) , nested_simplify(snake_case_ ) )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : Tuple , snake_case_ : int ):
UpperCamelCase_: Any = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
UpperCamelCase_: Dict = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase_: Optional[Any] = sorted(vocab.keys() )[:3]
UpperCamelCase_: Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase_: Optional[int] = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=snake_case_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(snake_case_ ) , 3 )
def lowerCAmelCase__ ( self : Any , snake_case_ : List[Any] , snake_case_ : Dict ):
UpperCamelCase_: List[str] = FillMaskPipeline(model=snake_case_ , tokenizer=snake_case_ )
UpperCamelCase_: Union[str, Any] = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
snake_case_ , [
[
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
],
[
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
],
[
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
{"""sequence""": ANY(snake_case_ ), """score""": ANY(snake_case_ ), """token""": ANY(snake_case_ ), """token_str""": ANY(snake_case_ )},
],
] , )
| 670 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , *snake_case_ : Optional[Any] , **snake_case_ : List[str] ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 670 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
if height >= 1:
move_tower(height - 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase )
move_disk(lowerCamelCase , lowerCamelCase )
move_tower(height - 1 , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[str]:
print("""moving disk from""" , lowerCamelCase , """to""" , lowerCamelCase )
def A__ ( ) -> str:
UpperCamelCase_: List[str] = int(input("""Height of hanoi: """ ).strip() )
move_tower(lowerCamelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
lowerCamelCase_ : Optional[Any] = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
lowerCamelCase_ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def A__ ( lowerCamelCase ) -> str:
UpperCamelCase_: List[Any] = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def A__ ( lowerCamelCase ) -> str:
if set(lowerCamelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
UpperCamelCase_: str = """"""
for word in coded.split():
while len(lowerCamelCase ) != 0:
decoded += decode_dict[word[:5]]
UpperCamelCase_: Optional[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 670 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def A__ ( lowerCamelCase , lowerCamelCase=0 ) -> Optional[Any]:
return sorted(lowerCamelCase , key=lambda lowerCamelCase : x[column] )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=float("""inf""" ) ) -> Optional[int]:
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCamelCase ):
UpperCamelCase_: Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCamelCase_: Any = current_dis
return min_dis
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=float("""inf""" ) ) -> Tuple:
for i in range(min(6 , points_counts - 1 ) , lowerCamelCase ):
for j in range(max(0 , i - 6 ) , lowerCamelCase ):
UpperCamelCase_: Optional[int] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
UpperCamelCase_: Dict = current_dis
return min_dis
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
# base case
if points_counts <= 3:
return dis_between_closest_pair(lowerCamelCase , lowerCamelCase )
# recursion
UpperCamelCase_: int = points_counts // 2
UpperCamelCase_: Optional[Any] = closest_pair_of_points_sqr(
lowerCamelCase , points_sorted_on_y[:mid] , lowerCamelCase )
UpperCamelCase_: str = closest_pair_of_points_sqr(
lowerCamelCase , points_sorted_on_y[mid:] , points_counts - mid )
UpperCamelCase_: Dict = min(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: List[str] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCamelCase )
UpperCamelCase_: Dict = dis_between_closest_in_strip(
lowerCamelCase , len(lowerCamelCase ) , lowerCamelCase )
return min(lowerCamelCase , lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[str]:
UpperCamelCase_: str = column_based_sort(lowerCamelCase , column=0 )
UpperCamelCase_: Tuple = column_based_sort(lowerCamelCase , column=1 )
return (
closest_pair_of_points_sqr(
lowerCamelCase , lowerCamelCase , lowerCamelCase )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ : int = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 670 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase_ : List[str] = Mapping[str, np.ndarray]
lowerCamelCase_ : int = Mapping[str, Any] # Is a nested dict.
lowerCamelCase_ : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=_A )
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__UpperCamelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__UpperCamelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__UpperCamelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__UpperCamelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__UpperCamelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
__UpperCamelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
__UpperCamelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
__UpperCamelCase : Optional[Sequence[int]] = None
def A__ ( lowerCamelCase ) -> Protein:
UpperCamelCase_: Optional[Any] = r"""(\[[A-Z]+\]\n)"""
UpperCamelCase_: List[str] = [tag.strip() for tag in re.split(lowerCamelCase , lowerCamelCase ) if len(lowerCamelCase ) > 0]
UpperCamelCase_: Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split("""\n""" ) for l in tags[1::2]] )
UpperCamelCase_: List[str] = ["N", "CA", "C"]
UpperCamelCase_: Dict = None
UpperCamelCase_: int = None
UpperCamelCase_: Union[str, Any] = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCamelCase_: int = g[1][0].strip()
for i in range(len(lowerCamelCase ) ):
if seq[i] not in residue_constants.restypes:
UpperCamelCase_: Any = """X""" # FIXME: strings are immutable
UpperCamelCase_: Union[str, Any] = np.array(
[residue_constants.restype_order.get(lowerCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCamelCase_: List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(lowerCamelCase , g[1][axis].split() ) ) )
UpperCamelCase_: List[str] = np.array(lowerCamelCase )
UpperCamelCase_: List[Any] = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowerCamelCase ):
UpperCamelCase_: Tuple = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCamelCase_: Union[str, Any] = np.array(list(map({"""-""": 0, """+""": 1}.get , g[1][0].strip() ) ) )
UpperCamelCase_: Any = np.zeros(
(
len(lowerCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowerCamelCase ):
UpperCamelCase_: Tuple = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowerCamelCase , atom_mask=lowerCamelCase , aatype=lowerCamelCase , residue_index=np.arange(len(lowerCamelCase ) ) , b_factors=lowerCamelCase , )
def A__ ( lowerCamelCase , lowerCamelCase = 0 ) -> List[str]:
UpperCamelCase_: List[str] = []
UpperCamelCase_: List[Any] = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
UpperCamelCase_: Union[str, Any] = prot.parents
UpperCamelCase_: Union[str, Any] = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCamelCase_: List[Any] = [p for i, p in zip(lowerCamelCase , lowerCamelCase ) if i == chain_id]
if parents is None or len(lowerCamelCase ) == 0:
UpperCamelCase_: List[Any] = ["""N/A"""]
pdb_headers.append(F'''PARENT {" ".join(lowerCamelCase )}''' )
return pdb_headers
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: List[str] = []
UpperCamelCase_: Union[str, Any] = pdb_str.split("""\n""" )
UpperCamelCase_: Optional[int] = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
UpperCamelCase_: List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
UpperCamelCase_: Union[str, Any] = []
if prot.parents_chain_index is not None:
UpperCamelCase_: Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowerCamelCase ) , [] )
parent_dict[str(lowerCamelCase )].append(lowerCamelCase )
UpperCamelCase_: Optional[int] = max([int(lowerCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCamelCase_: List[str] = parent_dict.get(str(lowerCamelCase ) , ["""N/A"""] )
parents_per_chain.append(lowerCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCamelCase_: Any = [["""N/A"""]]
def make_parent_line(lowerCamelCase ) -> str:
return F'''PARENT {" ".join(lowerCamelCase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCamelCase_: Dict = 0
for i, l in enumerate(lowerCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowerCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowerCamelCase ):
UpperCamelCase_: int = parents_per_chain[chain_counter]
else:
UpperCamelCase_: str = ["""N/A"""]
out_pdb_lines.append(make_parent_line(lowerCamelCase ) )
return "\n".join(lowerCamelCase )
def A__ ( lowerCamelCase ) -> str:
UpperCamelCase_: Optional[Any] = residue_constants.restypes + ["""X"""]
def res_atoa(lowerCamelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , """UNK""" )
UpperCamelCase_: List[str] = residue_constants.atom_types
UpperCamelCase_: List[str] = []
UpperCamelCase_: Dict = prot.atom_mask
UpperCamelCase_: Dict = prot.aatype
UpperCamelCase_: Union[str, Any] = prot.atom_positions
UpperCamelCase_: List[Any] = prot.residue_index.astype(np.intaa )
UpperCamelCase_: int = prot.b_factors
UpperCamelCase_: List[Any] = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
UpperCamelCase_: Tuple = get_pdb_headers(lowerCamelCase )
if len(lowerCamelCase ) > 0:
pdb_lines.extend(lowerCamelCase )
UpperCamelCase_: Optional[int] = aatype.shape[0]
UpperCamelCase_: Any = 1
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: Dict = string.ascii_uppercase
UpperCamelCase_: Optional[int] = None
# Add all atom sites.
for i in range(lowerCamelCase ):
UpperCamelCase_: List[Any] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowerCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCamelCase_: int = """ATOM"""
UpperCamelCase_: Dict = atom_name if len(lowerCamelCase ) == 4 else F''' {atom_name}'''
UpperCamelCase_: Union[str, Any] = """"""
UpperCamelCase_: Optional[Any] = """"""
UpperCamelCase_: Optional[int] = 1.00
UpperCamelCase_: List[str] = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCamelCase_: Optional[int] = """"""
UpperCamelCase_: Dict = """A"""
if chain_index is not None:
UpperCamelCase_: Optional[int] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCamelCase_: Optional[int] = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(lowerCamelCase )
atom_index += 1
UpperCamelCase_: Optional[int] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCamelCase_: Union[str, Any] = True
UpperCamelCase_: List[Any] = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCamelCase_: Tuple = """TER"""
UpperCamelCase_: str = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(lowerCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowerCamelCase , lowerCamelCase ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(lowerCamelCase )
def A__ ( lowerCamelCase ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , ) -> Protein:
return Protein(
aatype=features["""aatype"""] , atom_positions=result["""final_atom_positions"""] , atom_mask=result["""final_atom_mask"""] , residue_index=features["""residue_index"""] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ) , chain_index=lowerCamelCase , remark=lowerCamelCase , parents=lowerCamelCase , parents_chain_index=lowerCamelCase , )
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: list[list[str]] = [[] for _ in range(lowerCamelCase )]
UpperCamelCase_: Optional[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(lowerCamelCase ):
UpperCamelCase_: List[Any] = position % (lowest * 2) # puts it in bounds
UpperCamelCase_: Dict = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCamelCase )
UpperCamelCase_: str = ["""""".join(lowerCamelCase ) for row in temp_grid]
UpperCamelCase_: Dict = """""".join(lowerCamelCase )
return output_string
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: int = []
UpperCamelCase_: Dict = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
UpperCamelCase_: list[list[str]] = [[] for _ in range(lowerCamelCase )] # generates template
for position in range(len(lowerCamelCase ) ):
UpperCamelCase_: Optional[int] = position % (lowest * 2) # puts it in bounds
UpperCamelCase_: Optional[int] = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
UpperCamelCase_: List[Any] = 0
for row in temp_grid: # fills in the characters
UpperCamelCase_: Union[str, Any] = input_string[counter : counter + len(lowerCamelCase )]
grid.append(list(lowerCamelCase ) )
counter += len(lowerCamelCase )
UpperCamelCase_: List[str] = """""" # reads as zigzag
for position in range(len(lowerCamelCase ) ):
UpperCamelCase_: List[str] = position % (lowest * 2) # puts it in bounds
UpperCamelCase_: Any = min(lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def A__ ( lowerCamelCase ) -> dict[int, str]:
UpperCamelCase_: Optional[int] = {}
for key_guess in range(1 , len(lowerCamelCase ) ): # tries every key
UpperCamelCase_: Union[str, Any] = decrypt(lowerCamelCase , lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : int = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = """deformable_detr"""
__UpperCamelCase : List[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : int , snake_case_ : int=True , snake_case_ : Dict=None , snake_case_ : Optional[int]=3 , snake_case_ : List[Any]=300 , snake_case_ : Tuple=1024 , snake_case_ : Tuple=6 , snake_case_ : Union[str, Any]=1024 , snake_case_ : List[Any]=8 , snake_case_ : Dict=6 , snake_case_ : int=1024 , snake_case_ : str=8 , snake_case_ : List[str]=0.0 , snake_case_ : int=True , snake_case_ : Any="relu" , snake_case_ : str=256 , snake_case_ : List[str]=0.1 , snake_case_ : Optional[int]=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : Optional[int]=0.02 , snake_case_ : str=1.0 , snake_case_ : Any=True , snake_case_ : str=False , snake_case_ : List[str]="sine" , snake_case_ : List[Any]="resnet50" , snake_case_ : int=True , snake_case_ : str=False , snake_case_ : Tuple=4 , snake_case_ : Optional[Any]=4 , snake_case_ : Tuple=4 , snake_case_ : Optional[int]=False , snake_case_ : Tuple=300 , snake_case_ : Optional[int]=False , snake_case_ : Dict=1 , snake_case_ : Optional[Any]=5 , snake_case_ : Union[str, Any]=2 , snake_case_ : Optional[int]=1 , snake_case_ : int=1 , snake_case_ : List[Any]=5 , snake_case_ : Optional[int]=2 , snake_case_ : Tuple=0.1 , snake_case_ : Any=0.25 , snake_case_ : Union[str, Any]=False , **snake_case_ : List[str] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase_: str = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Tuple = backbone_config.get("""model_type""" )
UpperCamelCase_: Dict = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_: List[Any] = config_class.from_dict(snake_case_ )
UpperCamelCase_: Optional[int] = use_timm_backbone
UpperCamelCase_: Optional[Any] = backbone_config
UpperCamelCase_: str = num_channels
UpperCamelCase_: str = num_queries
UpperCamelCase_: Any = max_position_embeddings
UpperCamelCase_: str = d_model
UpperCamelCase_: Union[str, Any] = encoder_ffn_dim
UpperCamelCase_: int = encoder_layers
UpperCamelCase_: Tuple = encoder_attention_heads
UpperCamelCase_: List[str] = decoder_ffn_dim
UpperCamelCase_: List[Any] = decoder_layers
UpperCamelCase_: Dict = decoder_attention_heads
UpperCamelCase_: Any = dropout
UpperCamelCase_: List[Any] = attention_dropout
UpperCamelCase_: str = activation_dropout
UpperCamelCase_: List[Any] = activation_function
UpperCamelCase_: Tuple = init_std
UpperCamelCase_: Union[str, Any] = init_xavier_std
UpperCamelCase_: Dict = encoder_layerdrop
UpperCamelCase_: List[Any] = auxiliary_loss
UpperCamelCase_: Optional[int] = position_embedding_type
UpperCamelCase_: Dict = backbone
UpperCamelCase_: Optional[int] = use_pretrained_backbone
UpperCamelCase_: List[Any] = dilation
# deformable attributes
UpperCamelCase_: Optional[int] = num_feature_levels
UpperCamelCase_: Dict = encoder_n_points
UpperCamelCase_: str = decoder_n_points
UpperCamelCase_: int = two_stage
UpperCamelCase_: List[Any] = two_stage_num_proposals
UpperCamelCase_: str = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCamelCase_: List[Any] = class_cost
UpperCamelCase_: List[Any] = bbox_cost
UpperCamelCase_: List[Any] = giou_cost
# Loss coefficients
UpperCamelCase_: List[str] = mask_loss_coefficient
UpperCamelCase_: Union[str, Any] = dice_loss_coefficient
UpperCamelCase_: int = bbox_loss_coefficient
UpperCamelCase_: str = giou_loss_coefficient
UpperCamelCase_: Any = eos_coefficient
UpperCamelCase_: int = focal_alpha
UpperCamelCase_: str = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCAmelCase__ ( self : int ):
return self.encoder_attention_heads
@property
def lowerCAmelCase__ ( self : List[str] ):
return self.d_model
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Dict = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase_: List[Any] = self.backbone_config.to_dict()
UpperCamelCase_: List[str] = self.__class__.model_type
return output
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
lowerCamelCase_ : Union[str, Any] = """Alexander Joslin"""
import operator as op
from .stack import Stack
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: List[str] = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCamelCase_: Stack[int] = Stack()
UpperCamelCase_: Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase )
elif i == ")":
# RULE 4
UpperCamelCase_: Any = operator_stack.peek()
operator_stack.pop()
UpperCamelCase_: Dict = operand_stack.peek()
operand_stack.pop()
UpperCamelCase_: Any = operand_stack.peek()
operand_stack.pop()
UpperCamelCase_: Optional[int] = operators[opr](lowerCamelCase , lowerCamelCase )
operand_stack.push(lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase_ : str = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
def A__ ( lowerCamelCase ) -> list:
UpperCamelCase_: int = len(lowerCamelCase )
for i in range(1 , lowerCamelCase ):
UpperCamelCase_: str = collection[i]
UpperCamelCase_: str = 0
UpperCamelCase_: str = i - 1
while low <= high:
UpperCamelCase_: Tuple = (low + high) // 2
if val < collection[mid]:
UpperCamelCase_: Union[str, Any] = mid - 1
else:
UpperCamelCase_: Optional[Any] = mid + 1
for j in range(lowerCamelCase , lowerCamelCase , -1 ):
UpperCamelCase_: int = collection[j - 1]
UpperCamelCase_: Optional[int] = val
return collection
if __name__ == "__main__":
lowerCamelCase_ : Tuple = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase_ : str = [int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
def A__ ( lowerCamelCase ) -> list[int]:
UpperCamelCase_: List[Any] = [0 for i in range(len(lowerCamelCase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase_, UpperCamelCase_: str = 0, 0
for i in range(1 , len(lowerCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase_: Optional[int] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCamelCase_: Dict = min_edge
while go_next(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase_, UpperCamelCase_: List[Any] = i, i + z_result[i] - 1
return z_result
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> bool:
return i + z_result[i] < len(lowerCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
UpperCamelCase_: List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase_: Dict = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowerCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowerCamelCase_ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
lowerCamelCase_ : List[str] = """sshleifer/student_marian_en_ro_6_1"""
lowerCamelCase_ : Union[str, Any] = """sshleifer/tiny-mbart"""
@require_torch
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Any=False , snake_case_ : Any=None , snake_case_ : Optional[Any]=True , snake_case_ : Dict=True , snake_case_ : str=True , snake_case_ : Optional[int]=True , ):
UpperCamelCase_: Dict = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=snake_case_ , num_train_epochs=1 , distributed=snake_case_ , extra_args_str=snake_case_ , predict_with_generate=snake_case_ , do_train=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , )
UpperCamelCase_: str = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
UpperCamelCase_: Optional[Any] = [log for log in logs if """eval_loss""" in log.keys()]
UpperCamelCase_: List[str] = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCamelCase_: Any = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase__ ( self : str ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Dict ):
self.run_seqaseq_quick(distributed=snake_case_ )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : str ):
self.run_seqaseq_quick(distributed=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase__ ( self : List[Any] ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase__ ( self : Any ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase__ ( self : str ):
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=snake_case_ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase__ ( self : Optional[Any] ):
self.run_seqaseq_quick(
distributed=snake_case_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=snake_case_ )
@require_apex
@require_torch_gpu
def lowerCAmelCase__ ( self : List[Any] ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=snake_case_ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Optional[Any] ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCamelCase_: Optional[Any] = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
UpperCamelCase_: Dict = experiments[experiment_id]
UpperCamelCase_: Any = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
UpperCamelCase_: str = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**snake_case_ , extra_args_str=data["""extra_args_str"""] )
UpperCamelCase_: Tuple = len(re.findall(snake_case_ , cl.err ) )
self.assertEqual(snake_case_ , data["""n_matches"""] )
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Tuple = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=snake_case_ , learning_rate=3e-4 , num_train_epochs=10 , distributed=snake_case_ , )
# Check metrics
UpperCamelCase_: Tuple = TrainerState.load_from_json(os.path.join(snake_case_ , """trainer_state.json""" ) ).log_history
UpperCamelCase_: str = [log for log in logs if """eval_loss""" in log.keys()]
UpperCamelCase_: int = eval_metrics[0]
UpperCamelCase_: List[Any] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , snake_case_ )
# test if do_predict saves generations and metrics
UpperCamelCase_: Optional[int] = os.listdir(snake_case_ )
UpperCamelCase_: Union[str, Any] = {os.path.basename(snake_case_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase__ ( self : str ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(snake_case_ : str ) -> Tuple[int, float]:
UpperCamelCase_: Optional[int] = """--skip_memory_metrics 0"""
UpperCamelCase_: Any = self.run_trainer(
max_len=128 , model_name=snake_case_ , learning_rate=3e-4 , num_train_epochs=1 , optim=snake_case_ , distributed=snake_case_ , extra_args_str=snake_case_ , do_eval=snake_case_ , do_predict=snake_case_ , n_gpus_to_use=1 , )
# Check metrics
UpperCamelCase_: str = TrainerState.load_from_json(Path(snake_case_ , """trainer_state.json""" ) ).log_history
UpperCamelCase_: Union[str, Any] = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
UpperCamelCase_: Optional[int] = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
UpperCamelCase_: Union[str, Any] = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Union[str, Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[Any] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCamelCase_: Tuple = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCamelCase_: List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCamelCase_: Tuple = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCamelCase_: List[str] = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCamelCase_: Dict = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
snake_case_ , snake_case_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
snake_case_ , snake_case_ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : int , snake_case_ : float = 3e-3 , snake_case_ : str = "adafactor" , snake_case_ : bool = False , snake_case_ : str = None , snake_case_ : int = 0 , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : int = None , ):
UpperCamelCase_: Union[str, Any] = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
UpperCamelCase_: List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(snake_case_ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(snake_case_ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
UpperCamelCase_: List[Any] = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(snake_case_ )}
'''.split()
UpperCamelCase_: int = """
--do_predict
""".split()
UpperCamelCase_: Dict = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCamelCase_: Optional[int] = get_gpu_count()
UpperCamelCase_: Dict = get_torch_dist_unique_port()
UpperCamelCase_: Union[str, Any] = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
UpperCamelCase_: Any = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case_ , env=self.get_env() )
else:
UpperCamelCase_: Union[str, Any] = ["""run_translation.py"""] + args
with patch.object(snake_case_ , """argv""" , snake_case_ ):
main()
return output_dir
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : str = {"""vocab_file""": """vocab.txt"""}
lowerCamelCase_ : List[Any] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
lowerCamelCase_ : Tuple = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
lowerCamelCase_ : List[Any] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Any = VOCAB_FILES_NAMES
__UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[Any] = ConvBertTokenizer
def __init__( self : Dict , snake_case_ : str=None , snake_case_ : int=None , snake_case_ : List[Any]=True , snake_case_ : List[Any]="[UNK]" , snake_case_ : List[Any]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : List[Any]="[CLS]" , snake_case_ : Any="[MASK]" , snake_case_ : Optional[int]=True , snake_case_ : Dict=None , **snake_case_ : Dict , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
UpperCamelCase_: Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
UpperCamelCase_: int = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
UpperCamelCase_: List[str] = do_lower_case
UpperCamelCase_: Any = strip_accents
UpperCamelCase_: Any = tokenize_chinese_chars
UpperCamelCase_: Tuple = normalizer_class(**snake_case_ )
UpperCamelCase_: Dict = do_lower_case
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Dict=None ):
UpperCamelCase_: Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[str] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
UpperCamelCase_: Dict = [self.sep_token_id]
UpperCamelCase_: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Tuple , snake_case_ : str , snake_case_ : Optional[str] = None ):
UpperCamelCase_: Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=5 ) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("""<mask>""" ) == 1
UpperCamelCase_: List[str] = torch.tensor(tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase ) ).unsqueeze(0 ) # Batch size 1
UpperCamelCase_: str = model(lowerCamelCase )[0] # The last hidden-state is the first element of the output tuple
UpperCamelCase_: List[str] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
UpperCamelCase_: List[str] = logits[0, masked_index, :]
UpperCamelCase_: Optional[int] = logits.softmax(dim=0 )
UpperCamelCase_, UpperCamelCase_: str = prob.topk(k=lowerCamelCase , dim=0 )
UpperCamelCase_: Optional[Any] = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCamelCase ) )] )
UpperCamelCase_: Optional[int] = tokenizer.mask_token
UpperCamelCase_: str = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
UpperCamelCase_: Union[str, Any] = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(lowerCamelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(lowerCamelCase ) , lowerCamelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowerCamelCase , lowerCamelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowerCamelCase_ : Optional[Any] = CamembertTokenizer.from_pretrained("""camembert-base""")
lowerCamelCase_ : List[Any] = CamembertForMaskedLM.from_pretrained("""camembert-base""")
model.eval()
lowerCamelCase_ : Optional[int] = """Le camembert est <mask> :)"""
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 1 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase_ : List[str] = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 6_55_36,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 6_55_36,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 4_80_00,
"""sample_size""": 13_10_72,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 1_60_00,
"""sample_size""": 6_55_36,
},
}
def A__ ( lowerCamelCase , lowerCamelCase ) -> Any:
return torch.atana(lowerCamelCase , lowerCamelCase ) / math.pi * 2
def A__ ( lowerCamelCase ) -> List[str]:
UpperCamelCase_: int = torch.sin(t * math.pi / 2 ) ** 2
UpperCamelCase_: Optional[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase , lowerCamelCase )
class _UpperCamelCase ( _A ):
'''simple docstring'''
pass
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , snake_case_ : Any ):
super().__init__()
UpperCamelCase_: Dict = DiffusionAttnUnetaD(snake_case_ , n_attn_layers=4 )
UpperCamelCase_: List[str] = deepcopy(self.diffusion )
UpperCamelCase_: int = torch.quasirandom.SobolEngine(1 , scramble=snake_case_ )
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: Union[str, Any] = MODELS_MAP[model_name]["""url"""]
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCamelCase_ : str = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
lowerCamelCase_ : List[Any] = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
lowerCamelCase_ : Tuple = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
lowerCamelCase_ : List[Any] = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
lowerCamelCase_ : int = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
lowerCamelCase_ : Tuple = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def A__ ( lowerCamelCase ) -> str:
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def A__ ( lowerCamelCase ) -> Dict:
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase ) and not isinstance(lowerCamelCase , lowerCamelCase ):
return name.replace(lowerCamelCase , lowerCamelCase )
elif name.startswith(lowerCamelCase ):
return [name.replace(lowerCamelCase , lowerCamelCase ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def A__ ( lowerCamelCase , lowerCamelCase=13 ) -> Tuple:
UpperCamelCase_: Optional[Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
UpperCamelCase_: str = 0
if string.startswith("""net.3.""" ):
depth += 1
UpperCamelCase_: List[str] = string[6:]
elif string.startswith("""net.""" ):
UpperCamelCase_: int = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
UpperCamelCase_: Dict = string[7:]
if string.startswith("""main.""" ):
UpperCamelCase_: str = string[5:]
# mid block
if string[:2].isdigit():
UpperCamelCase_: List[Any] = string[:2]
UpperCamelCase_: Optional[int] = string[2:]
else:
UpperCamelCase_: Union[str, Any] = string[0]
UpperCamelCase_: List[Any] = string[1:]
if depth == max_depth:
UpperCamelCase_: Tuple = MID_NUM_TO_LAYER[layer_num]
UpperCamelCase_: Optional[int] = """mid_block"""
elif depth > 0 and int(lowerCamelCase ) < 7:
UpperCamelCase_: str = DOWN_NUM_TO_LAYER[layer_num]
UpperCamelCase_: Tuple = F'''down_blocks.{depth}'''
elif depth > 0 and int(lowerCamelCase ) > 7:
UpperCamelCase_: Dict = UP_NUM_TO_LAYER[layer_num]
UpperCamelCase_: List[Any] = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
UpperCamelCase_: List[Any] = DEPTH_0_TO_LAYER[layer_num]
UpperCamelCase_: int = F'''up_blocks.{max_depth - 1}''' if int(lowerCamelCase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
UpperCamelCase_: int = string_left[1:]
if "resnets" in new_layer:
UpperCamelCase_: int = convert_resconv_naming(lowerCamelCase )
elif "attentions" in new_layer:
UpperCamelCase_: str = convert_attn_naming(lowerCamelCase )
UpperCamelCase_: List[Any] = new_string_left
if not isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: int = prefix + """.""" + new_layer + """.""" + string_left
else:
UpperCamelCase_: Tuple = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def A__ ( lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: Tuple = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
UpperCamelCase_: int = rename(lowerCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: int = transform_conv_attns(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
UpperCamelCase_: Dict = v
return new_state_dict
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> int:
if len(lowerCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
UpperCamelCase_: Any = v[:, :, 0]
else:
# bias
UpperCamelCase_: Any = v
else:
# qkv matrices
UpperCamelCase_: int = v.shape[0]
UpperCamelCase_: Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCamelCase_: List[Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCamelCase_: List[str] = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def A__ ( lowerCamelCase ) -> Tuple:
UpperCamelCase_: Union[str, Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
UpperCamelCase_: str = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
UpperCamelCase_: Any = download(lowerCamelCase )
UpperCamelCase_: List[Any] = MODELS_MAP[model_name]["""sample_rate"""]
UpperCamelCase_: Union[str, Any] = MODELS_MAP[model_name]["""sample_size"""]
UpperCamelCase_: Union[str, Any] = Object()
UpperCamelCase_: str = sample_size
UpperCamelCase_: Optional[int] = sample_rate
UpperCamelCase_: int = 0
UpperCamelCase_: int = UNetaDModel(sample_size=lowerCamelCase , sample_rate=lowerCamelCase )
UpperCamelCase_: Tuple = diffusers_model.state_dict()
UpperCamelCase_: Any = DiffusionUncond(lowerCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase )["""state_dict"""] )
UpperCamelCase_: Union[str, Any] = orig_model.diffusion_ema.eval()
UpperCamelCase_: Dict = orig_model.state_dict()
UpperCamelCase_: Union[str, Any] = rename_orig_weights(lowerCamelCase )
UpperCamelCase_: int = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCamelCase_: str = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
UpperCamelCase_: Tuple = value.squeeze()
UpperCamelCase_: Optional[int] = value
diffusers_model.load_state_dict(lowerCamelCase )
UpperCamelCase_: str = 1_00
UpperCamelCase_: Optional[Any] = 33
UpperCamelCase_: List[str] = IPNDMScheduler(num_train_timesteps=lowerCamelCase )
UpperCamelCase_: Union[str, Any] = torch.manual_seed(lowerCamelCase )
UpperCamelCase_: List[str] = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase ).to(lowerCamelCase )
UpperCamelCase_: int = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase )[:-1]
UpperCamelCase_: Dict = get_crash_schedule(lowerCamelCase )
UpperCamelCase_: List[str] = DanceDiffusionPipeline(unet=lowerCamelCase , scheduler=lowerCamelCase )
UpperCamelCase_: str = torch.manual_seed(33 )
UpperCamelCase_: Any = pipe(num_inference_steps=lowerCamelCase , generator=lowerCamelCase ).audios
UpperCamelCase_: str = sampling.iplms_sample(lowerCamelCase , lowerCamelCase , lowerCamelCase , {} )
UpperCamelCase_: int = generated.clamp(-1 , 1 )
UpperCamelCase_: str = (generated - audio).abs().sum()
UpperCamelCase_: Dict = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase )
print("""Diff max""" , lowerCamelCase )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCamelCase_ : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCamelCase_ : Dict = parser.parse_args()
main(args)
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
import math
def A__ ( ) -> None:
UpperCamelCase_: Optional[Any] = input("""Enter message: """ )
UpperCamelCase_: str = int(input(F'''Enter key [2-{len(lowerCamelCase ) - 1}]: ''' ) )
UpperCamelCase_: List[str] = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
UpperCamelCase_: Any = encrypt_message(lowerCamelCase , lowerCamelCase )
elif mode.lower().startswith("""d""" ):
UpperCamelCase_: Dict = decrypt_message(lowerCamelCase , lowerCamelCase )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'''Output:\n{text + "|"}''' )
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Dict = [""""""] * key
for col in range(lowerCamelCase ):
UpperCamelCase_: List[str] = col
while pointer < len(lowerCamelCase ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCamelCase )
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: Any = math.ceil(len(lowerCamelCase ) / key )
UpperCamelCase_: int = key
UpperCamelCase_: Any = (num_cols * num_rows) - len(lowerCamelCase )
UpperCamelCase_: Dict = [""""""] * num_cols
UpperCamelCase_: List[str] = 0
UpperCamelCase_: Dict = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCamelCase_: str = 0
row += 1
return "".join(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 670 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: int = len(lowerCamelCase )
UpperCamelCase_: int = len(lowerCamelCase )
UpperCamelCase_: int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCamelCase_: list = []
for char_count in range(lowerCamelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowerCamelCase )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 670 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while a != 0:
UpperCamelCase_, UpperCamelCase_: Dict = b % a, a
return b
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
if gcd(lowerCamelCase , lowerCamelCase ) != 1:
UpperCamelCase_: str = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = 1, 0, a
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Dict = 0, 1, m
while va != 0:
UpperCamelCase_: List[str] = ua // va
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 670 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCamelCase_ : Optional[int] = data_utils.TransfoXLTokenizer
lowerCamelCase_ : Tuple = data_utils.TransfoXLCorpus
lowerCamelCase_ : List[Any] = data_utils
lowerCamelCase_ : Optional[int] = data_utils
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[int]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowerCamelCase , """rb""" ) as fp:
UpperCamelCase_: Optional[Any] = pickle.load(lowerCamelCase , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase_: List[Any] = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCamelCase_: List[str] = corpus.vocab.__dict__
torch.save(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: List[str] = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , lowerCamelCase )
UpperCamelCase_: Optional[Any] = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(lowerCamelCase , lowerCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase_: List[Any] = os.path.abspath(lowerCamelCase )
UpperCamelCase_: Any = os.path.abspath(lowerCamelCase )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase_: Optional[Any] = TransfoXLConfig()
else:
UpperCamelCase_: List[str] = TransfoXLConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Optional[Any] = TransfoXLLMHeadModel(lowerCamelCase )
UpperCamelCase_: int = load_tf_weights_in_transfo_xl(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
UpperCamelCase_: List[Any] = os.path.join(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: List[str] = os.path.join(lowerCamelCase , lowerCamelCase )
print(F'''Save PyTorch model to {os.path.abspath(lowerCamelCase )}''' )
torch.save(model.state_dict() , lowerCamelCase )
print(F'''Save configuration file to {os.path.abspath(lowerCamelCase )}''' )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 670 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 1 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 1 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[Any] = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: List[str] = Rectangle(height=0.25 , width=0.25 )
UpperCamelCase_: List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: str = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: Optional[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: List[str] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Dict = Text("""CPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: str = [mem.copy() for i in range(4 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.move_to([-1, -1, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[Any] = [mem.copy() for i in range(6 )]
UpperCamelCase_: str = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = Text("""Model""" , font_size=24 )
UpperCamelCase_: str = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.add(snake_case_ )
UpperCamelCase_: List[str] = []
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: List[Any] = []
for i, rect in enumerate(snake_case_ ):
rect.set_stroke(snake_case_ )
UpperCamelCase_: List[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=snake_case_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=snake_case_ , buff=0.0 )
self.add(snake_case_ )
model_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ , *snake_case_ )
UpperCamelCase_: Union[str, Any] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = Text("""Loaded Checkpoint""" , font_size=24 )
UpperCamelCase_: List[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: str = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Optional[int] = fill.copy().set_fill(snake_case_ , opacity=0.7 )
target.move_to(snake_case_ )
ckpt_arr.append(snake_case_ )
UpperCamelCase_: List[str] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(snake_case_ )
self.add(*snake_case_ , *snake_case_ )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case_ , snake_case_ )
UpperCamelCase_: int = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(snake_case_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(snake_case_ )
UpperCamelCase_: List[str] = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
UpperCamelCase_: Any = [meta_mem.copy() for i in range(6 )]
UpperCamelCase_: Union[str, Any] = [meta_mem.copy() for i in range(6 )]
UpperCamelCase_: Any = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: List[str] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: List[str] = Text("""Disk""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(snake_case_ , run_time=3 ) , Write(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) )
UpperCamelCase_: Dict = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Any = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(FadeOut(snake_case_ ) )
UpperCamelCase_: Optional[int] = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=3 ) )
self.play(
FadeOut(snake_case_ , snake_case_ , *snake_case_ , *snake_case_ ) , )
self.wait()
| 670 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 1 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
lowerCamelCase_ : Optional[Any] = threading.Lock()
lowerCamelCase_ : Optional[logging.Handler] = None
lowerCamelCase_ : Union[str, Any] = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowerCamelCase_ : List[str] = logging.WARNING
lowerCamelCase_ : Optional[int] = True
def A__ ( ) -> Any:
UpperCamelCase_: Dict = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def A__ ( ) -> str:
return __name__.split(""".""" )[0]
def A__ ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def A__ ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase_: Any = logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase_: Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase_: Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCamelCase_: Any = False
def A__ ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase_: List[Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCamelCase_: int = None
def A__ ( ) -> Optional[Any]:
return log_levels
def A__ ( lowerCamelCase = None ) -> logging.Logger:
if name is None:
UpperCamelCase_: str = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCamelCase )
def A__ ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def A__ ( lowerCamelCase ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCamelCase )
def A__ ( ) -> Any:
return set_verbosity(lowerCamelCase )
def A__ ( ) -> Optional[int]:
return set_verbosity(lowerCamelCase )
def A__ ( ) -> Any:
return set_verbosity(lowerCamelCase )
def A__ ( ) -> Dict:
return set_verbosity(lowerCamelCase )
def A__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def A__ ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def A__ ( lowerCamelCase ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCamelCase )
def A__ ( lowerCamelCase ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCamelCase )
def A__ ( ) -> None:
_configure_library_root_logger()
UpperCamelCase_: Any = False
def A__ ( ) -> None:
_configure_library_root_logger()
UpperCamelCase_: List[str] = True
def A__ ( ) -> None:
UpperCamelCase_: List[str] = _get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase_: Optional[int] = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(lowerCamelCase )
def A__ ( ) -> None:
UpperCamelCase_: int = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCamelCase )
def A__ ( self , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
UpperCamelCase_: List[str] = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , lowerCamelCase )
if no_advisory_warnings:
return
self.warning(*lowerCamelCase , **lowerCamelCase )
lowerCamelCase_ : Tuple = warning_advice
@functools.lru_cache(lowerCamelCase )
def A__ ( self , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
self.warning(*lowerCamelCase , **lowerCamelCase )
lowerCamelCase_ : List[str] = warning_once
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Tuple , *snake_case_ : Optional[Any] , **snake_case_ : Union[str, Any] ): # pylint: disable=unused-argument
UpperCamelCase_: Tuple = args[0] if args else None
def __iter__( self : Tuple ):
return iter(self._iterator )
def __getattr__( self : Tuple , snake_case_ : Optional[Any] ):
def empty_fn(*snake_case_ : str , **snake_case_ : Optional[int] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[str] ):
return self
def __exit__( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Optional[Any] ):
return
class _UpperCamelCase :
'''simple docstring'''
def __call__( self : Any , *snake_case_ : str , **snake_case_ : Dict ):
if _tqdm_active:
return tqdm_lib.tqdm(*snake_case_ , **snake_case_ )
else:
return EmptyTqdm(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple , *snake_case_ : Tuple , **snake_case_ : Dict ):
UpperCamelCase_: Tuple = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : int ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowerCamelCase_ : List[str] = _tqdm_cls()
def A__ ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def A__ ( ) -> List[Any]:
global _tqdm_active
UpperCamelCase_: Dict = True
hf_hub_utils.enable_progress_bars()
def A__ ( ) -> List[Any]:
global _tqdm_active
UpperCamelCase_: Tuple = False
hf_hub_utils.disable_progress_bars()
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
from __future__ import annotations
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> float:
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> float:
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowerCamelCase , nominal_annual_percentage_rate / 3_65 , number_of_years * 3_65 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_, UpperCamelCase_: Any = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase_: Optional[int] = """A painting of a squirrel eating a burger"""
UpperCamelCase_: Union[str, Any] = jax.device_count()
UpperCamelCase_: Tuple = num_samples * [prompt]
UpperCamelCase_: List[str] = sd_pipe.prepare_inputs(snake_case_ )
UpperCamelCase_: List[str] = replicate(snake_case_ )
UpperCamelCase_: Any = shard(snake_case_ )
UpperCamelCase_: List[Any] = jax.random.PRNGKey(0 )
UpperCamelCase_: Dict = jax.random.split(snake_case_ , jax.device_count() )
UpperCamelCase_: Tuple = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=25 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase_: List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: Optional[Any] = images[0, 253:256, 253:256, -1]
UpperCamelCase_: List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = """stabilityai/stable-diffusion-2"""
UpperCamelCase_, UpperCamelCase_: int = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case_ , subfolder="""scheduler""" )
UpperCamelCase_, UpperCamelCase_: str = FlaxStableDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , revision="""bf16""" , dtype=jnp.bfloataa , )
UpperCamelCase_: List[str] = scheduler_params
UpperCamelCase_: Union[str, Any] = """A painting of a squirrel eating a burger"""
UpperCamelCase_: Optional[int] = jax.device_count()
UpperCamelCase_: List[str] = num_samples * [prompt]
UpperCamelCase_: Any = sd_pipe.prepare_inputs(snake_case_ )
UpperCamelCase_: str = replicate(snake_case_ )
UpperCamelCase_: List[str] = shard(snake_case_ )
UpperCamelCase_: List[Any] = jax.random.PRNGKey(0 )
UpperCamelCase_: List[Any] = jax.random.split(snake_case_ , jax.device_count() )
UpperCamelCase_: Tuple = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=25 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCamelCase_: List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase_: int = images[0, 253:256, 253:256, -1]
UpperCamelCase_: Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase_: Tuple = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 670 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Dict = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
lowerCamelCase_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def A__ ( lowerCamelCase ) -> str:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase_: int = model_type_to_module_name(lowerCamelCase )
UpperCamelCase_: int = importlib.import_module(F'''.{module_name}''' , """transformers.models""" )
try:
return getattr(lowerCamelCase , lowerCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase , """__name__""" , lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase_: List[str] = importlib.import_module("""transformers""" )
if hasattr(lowerCamelCase , lowerCamelCase ):
return getattr(lowerCamelCase , lowerCamelCase )
return None
def A__ ( lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , **lowerCamelCase , ) -> Tuple:
UpperCamelCase_: Dict = get_file_from_repo(
lowerCamelCase , lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , resume_download=lowerCamelCase , proxies=lowerCamelCase , use_auth_token=lowerCamelCase , revision=lowerCamelCase , local_files_only=lowerCamelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(lowerCamelCase , encoding="""utf-8""" ) as reader:
return json.load(lowerCamelCase )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : str ):
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def lowerCAmelCase__ ( cls : Dict , snake_case_ : Tuple , **snake_case_ : Optional[Any] ):
UpperCamelCase_: List[str] = kwargs.pop("""config""" , snake_case_ )
UpperCamelCase_: List[Any] = kwargs.pop("""trust_remote_code""" , snake_case_ )
UpperCamelCase_: Optional[int] = True
UpperCamelCase_, UpperCamelCase_: str = ImageProcessingMixin.get_image_processor_dict(snake_case_ , **snake_case_ )
UpperCamelCase_: Any = config_dict.get("""image_processor_type""" , snake_case_ )
UpperCamelCase_: str = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase_: List[str] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase_: List[Any] = config_dict.pop("""feature_extractor_type""" , snake_case_ )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
UpperCamelCase_: List[str] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
UpperCamelCase_: int = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
UpperCamelCase_: Any = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Any = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.image_processor_type``
UpperCamelCase_: Any = getattr(snake_case_ , """image_processor_type""" , snake_case_ )
if hasattr(snake_case_ , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase_: Tuple = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
UpperCamelCase_: Dict = image_processor_class_from_name(snake_case_ )
UpperCamelCase_: str = image_processor_auto_map is not None
UpperCamelCase_: int = image_processor_class is not None or type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase_: Tuple = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
UpperCamelCase_: Tuple = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
UpperCamelCase_: int = kwargs.pop("""code_revision""" , snake_case_ )
if os.path.isdir(snake_case_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(snake_case_ ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase_: Dict = IMAGE_PROCESSOR_MAPPING[type(snake_case_ )]
return image_processor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(snake_case_ , snake_case_ )
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ["""image_processor""", """tokenizer"""]
__UpperCamelCase : Tuple = """ViltImageProcessor"""
__UpperCamelCase : Any = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , snake_case_ : str=None , snake_case_ : Optional[int]=None , **snake_case_ : str ):
UpperCamelCase_: List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , snake_case_ , )
UpperCamelCase_: List[str] = kwargs.pop("""feature_extractor""" )
UpperCamelCase_: Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(snake_case_ , snake_case_ )
UpperCamelCase_: int = self.image_processor
def __call__( self : Tuple , snake_case_ : List[str] , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : int , ):
UpperCamelCase_: Any = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel_values + pixel_mask
UpperCamelCase_: Any = self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def lowerCAmelCase__ ( self : Optional[Any] , *snake_case_ : Dict , **snake_case_ : Tuple ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , *snake_case_ : Optional[int] , **snake_case_ : List[str] ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: int = self.tokenizer.model_input_names
UpperCamelCase_: Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self : List[Any] ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , snake_case_ , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : int ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , snake_case_ , )
return self.image_processor
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A__ ( lowerCamelCase ) -> Union[str, Any]:
# getting number of pixels in the image
UpperCamelCase_, UpperCamelCase_: Tuple = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowerCamelCase ):
for j in range(lowerCamelCase ):
UpperCamelCase_: int = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowerCamelCase_ : Optional[Any] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
lowerCamelCase_ : Optional[Any] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Dict = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowerCamelCase_ : Any = {
"""b0""": {
"""hidden_dim""": 12_80,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_24,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 12_80,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_40,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 14_08,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_60,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 15_36,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_00,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 17_92,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_80,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 20_48,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_56,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 23_04,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_28,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 25_60,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_00,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def A__ ( lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: Dict = EfficientNetConfig()
UpperCamelCase_: Optional[Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
UpperCamelCase_: Optional[int] = CONFIG_MAP[model_name]["""width_coef"""]
UpperCamelCase_: str = CONFIG_MAP[model_name]["""depth_coef"""]
UpperCamelCase_: int = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase_: Optional[int] = CONFIG_MAP[model_name]["""dropout_rate"""]
UpperCamelCase_: Optional[Any] = CONFIG_MAP[model_name]["""dw_padding"""]
UpperCamelCase_: int = """huggingface/label-files"""
UpperCamelCase_: Union[str, Any] = """imagenet-1k-id2label.json"""
UpperCamelCase_: Optional[int] = 10_00
UpperCamelCase_: List[Any] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_: str = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_: str = idalabel
UpperCamelCase_: Tuple = {v: k for k, v in idalabel.items()}
return config
def A__ ( ) -> Optional[Any]:
UpperCamelCase_: Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase_: Any = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
def A__ ( lowerCamelCase ) -> Tuple:
UpperCamelCase_: Dict = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase_: int = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowerCamelCase , )
return preprocessor
def A__ ( lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: Tuple = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
UpperCamelCase_: List[str] = sorted(set(lowerCamelCase ) )
UpperCamelCase_: Optional[Any] = len(lowerCamelCase )
UpperCamelCase_: Any = {b: str(lowerCamelCase ) for b, i in zip(lowerCamelCase , range(lowerCamelCase ) )}
UpperCamelCase_: List[str] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
UpperCamelCase_: List[Any] = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
UpperCamelCase_: int = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCamelCase_: Optional[Any] = """efficientnet.""" + item[1]
UpperCamelCase_: List[str] = """classifier.weight"""
UpperCamelCase_: Dict = """classifier.bias"""
return key_mapping
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCamelCase_: Dict = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCamelCase_: List[Any] = torch.from_numpy(lowerCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCamelCase_: int = torch.from_numpy(lowerCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCamelCase_: str = torch.from_numpy(np.transpose(lowerCamelCase ) )
else:
UpperCamelCase_: int = torch.from_numpy(lowerCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase )
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
UpperCamelCase_: Optional[int] = model_classes[model_name](
include_top=lowerCamelCase , weights="""imagenet""" , input_tensor=lowerCamelCase , input_shape=lowerCamelCase , pooling=lowerCamelCase , classes=10_00 , classifier_activation="""softmax""" , )
UpperCamelCase_: Tuple = original_model.trainable_variables
UpperCamelCase_: str = original_model.non_trainable_variables
UpperCamelCase_: List[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCamelCase_: Any = param.numpy()
UpperCamelCase_: Union[str, Any] = list(tf_params.keys() )
# Load HuggingFace model
UpperCamelCase_: List[Any] = get_efficientnet_config(lowerCamelCase )
UpperCamelCase_: Optional[int] = EfficientNetForImageClassification(lowerCamelCase ).eval()
UpperCamelCase_: List[str] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
UpperCamelCase_: List[Any] = rename_keys(lowerCamelCase )
replace_params(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Initialize preprocessor and preprocess input image
UpperCamelCase_: List[str] = convert_image_processor(lowerCamelCase )
UpperCamelCase_: Dict = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCamelCase_: Tuple = hf_model(**lowerCamelCase )
UpperCamelCase_: List[str] = outputs.logits.detach().numpy()
# Original model inference
UpperCamelCase_: Any = False
UpperCamelCase_: Union[str, Any] = CONFIG_MAP[model_name]["""image_size"""]
UpperCamelCase_: Tuple = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCamelCase_: List[Any] = image.img_to_array(lowerCamelCase )
UpperCamelCase_: int = np.expand_dims(lowerCamelCase , axis=0 )
UpperCamelCase_: List[Any] = original_model.predict(lowerCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase ):
os.mkdir(lowerCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase )
preprocessor.save_pretrained(lowerCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
UpperCamelCase_: int = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowerCamelCase )
hf_model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowerCamelCase_ : int = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , *snake_case_ : Dict , **snake_case_ : Dict ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : int = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = """sew-d"""
def __init__( self : Optional[Any] , snake_case_ : Tuple=32 , snake_case_ : Optional[int]=768 , snake_case_ : List[str]=12 , snake_case_ : List[Any]=12 , snake_case_ : Union[str, Any]=3072 , snake_case_ : int=2 , snake_case_ : Tuple=512 , snake_case_ : int=256 , snake_case_ : Optional[Any]=True , snake_case_ : Optional[int]=True , snake_case_ : str=("p2c", "c2p") , snake_case_ : Optional[Any]="layer_norm" , snake_case_ : Dict="gelu_python" , snake_case_ : str=0.1 , snake_case_ : str=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : int=0.0 , snake_case_ : List[Any]=0.1 , snake_case_ : int=0.02 , snake_case_ : int=1e-7 , snake_case_ : Tuple=1e-5 , snake_case_ : Optional[int]="group" , snake_case_ : Any="gelu" , snake_case_ : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case_ : Optional[int]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_ : Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=128 , snake_case_ : Optional[int]=16 , snake_case_ : List[str]=True , snake_case_ : Optional[int]=0.05 , snake_case_ : Optional[Any]=10 , snake_case_ : str=2 , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=10 , snake_case_ : Dict=0 , snake_case_ : Union[str, Any]="mean" , snake_case_ : Tuple=False , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=256 , snake_case_ : Optional[int]=0 , snake_case_ : Optional[Any]=1 , snake_case_ : Any=2 , **snake_case_ : Tuple , ):
super().__init__(**snake_case_ , pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ )
UpperCamelCase_: Optional[Any] = hidden_size
UpperCamelCase_: Optional[int] = feat_extract_norm
UpperCamelCase_: int = feat_extract_activation
UpperCamelCase_: Union[str, Any] = list(snake_case_ )
UpperCamelCase_: int = list(snake_case_ )
UpperCamelCase_: Optional[int] = list(snake_case_ )
UpperCamelCase_: Dict = conv_bias
UpperCamelCase_: Optional[int] = num_conv_pos_embeddings
UpperCamelCase_: Any = num_conv_pos_embedding_groups
UpperCamelCase_: Union[str, Any] = len(self.conv_dim )
UpperCamelCase_: str = num_hidden_layers
UpperCamelCase_: Optional[Any] = intermediate_size
UpperCamelCase_: int = squeeze_factor
UpperCamelCase_: int = max_position_embeddings
UpperCamelCase_: Union[str, Any] = position_buckets
UpperCamelCase_: Any = share_att_key
UpperCamelCase_: Optional[int] = relative_attention
UpperCamelCase_: Optional[int] = norm_rel_ebd
UpperCamelCase_: List[Any] = list(snake_case_ )
UpperCamelCase_: Union[str, Any] = hidden_act
UpperCamelCase_: str = num_attention_heads
UpperCamelCase_: str = hidden_dropout
UpperCamelCase_: List[Any] = attention_dropout
UpperCamelCase_: Any = activation_dropout
UpperCamelCase_: Union[str, Any] = feat_proj_dropout
UpperCamelCase_: str = final_dropout
UpperCamelCase_: Dict = layer_norm_eps
UpperCamelCase_: List[str] = feature_layer_norm_eps
UpperCamelCase_: str = initializer_range
UpperCamelCase_: Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase_: int = apply_spec_augment
UpperCamelCase_: str = mask_time_prob
UpperCamelCase_: Optional[Any] = mask_time_length
UpperCamelCase_: Tuple = mask_time_min_masks
UpperCamelCase_: Optional[int] = mask_feature_prob
UpperCamelCase_: Dict = mask_feature_length
UpperCamelCase_: List[Any] = mask_feature_min_masks
# ctc loss
UpperCamelCase_: int = ctc_loss_reduction
UpperCamelCase_: List[str] = ctc_zero_infinity
# sequence classification
UpperCamelCase_: Optional[int] = use_weighted_layer_sum
UpperCamelCase_: Dict = classifier_proj_size
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
def A__ ( lowerCamelCase ) -> list:
UpperCamelCase_: Union[str, Any] = int(lowerCamelCase )
if n_element < 1:
UpperCamelCase_: Dict = ValueError("""a should be a positive number""" )
raise my_error
UpperCamelCase_: int = [1]
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Union[str, Any] = (0, 0, 0)
UpperCamelCase_: Tuple = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCamelCase_ : Union[str, Any] = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
import os
from math import logaa
def A__ ( lowerCamelCase = "base_exp.txt" ) -> int:
UpperCamelCase_: float = 0
UpperCamelCase_: str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase ) , lowerCamelCase ) ) ):
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = list(map(lowerCamelCase , line.split(""",""" ) ) )
if x * logaa(lowerCamelCase ) > largest:
UpperCamelCase_: Dict = x * logaa(lowerCamelCase )
UpperCamelCase_: Tuple = i + 1
return result
if __name__ == "__main__":
print(solution())
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase_ : Optional[Any] = 16
lowerCamelCase_ : int = 32
def A__ ( lowerCamelCase , lowerCamelCase = 16 ) -> List[str]:
UpperCamelCase_: Dict = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase_: Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_: List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_: Optional[int] = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_: Union[str, Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_: Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_: List[Any] = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_: Any = 8
else:
UpperCamelCase_: Optional[Any] = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase_: Any = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase )
UpperCamelCase_: List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
# Initialize accelerator
UpperCamelCase_: List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_: List[Any] = config["""lr"""]
UpperCamelCase_: List[Any] = int(config["""num_epochs"""] )
UpperCamelCase_: Optional[Any] = int(config["""seed"""] )
UpperCamelCase_: List[Any] = int(config["""batch_size"""] )
UpperCamelCase_: Optional[int] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCamelCase_: Any = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCamelCase_: Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
UpperCamelCase_: str = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase )
UpperCamelCase_, UpperCamelCase_: Tuple = get_dataloaders(lowerCamelCase , lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_: Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_: Dict = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase_: List[Any] = AdamW(params=model.parameters() , lr=lowerCamelCase )
# Instantiate scheduler
UpperCamelCase_: Any = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(lowerCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Now we train the model
for epoch in range(lowerCamelCase ):
model.train()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase_: str = model(**lowerCamelCase )
UpperCamelCase_: Union[str, Any] = outputs.loss
UpperCamelCase_: Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase_: Dict = model(**lowerCamelCase )
UpperCamelCase_: Optional[int] = outputs.logits.argmax(dim=-1 )
UpperCamelCase_, UpperCamelCase_: Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
UpperCamelCase_: Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCamelCase )
def A__ ( ) -> Union[str, Any]:
UpperCamelCase_: int = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase_: Optional[Any] = parser.parse_args()
UpperCamelCase_: Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
main()
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def A__ ( lowerCamelCase , lowerCamelCase=10 ) -> List[str]:
UpperCamelCase_: Dict = []
for _ in range(lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def A__ ( lowerCamelCase , lowerCamelCase=10 ) -> Optional[int]:
UpperCamelCase_: Tuple = []
for step in range(lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: List[str] = os.path.join(lowerCamelCase , """schedule.bin""" )
torch.save(scheduler.state_dict() , lowerCamelCase )
UpperCamelCase_: int = torch.load(lowerCamelCase )
scheduler.load_state_dict(lowerCamelCase )
return lrs
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[int] ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
UpperCamelCase_: int = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase_: int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase_: Union[str, Any] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
UpperCamelCase_: List[Any] = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case_ )
UpperCamelCase_: Tuple = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase_: Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase_: List[Any] = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case_ , weight_decay=0.0 , relative_step=snake_case_ , scale_parameter=snake_case_ , warmup_init=snake_case_ , )
for _ in range(1000 ):
UpperCamelCase_: Tuple = criterion(snake_case_ , snake_case_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : int = nn.Linear(50 , 50 ) if is_torch_available() else None
__UpperCamelCase : Union[str, Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
__UpperCamelCase : Any = 10
def lowerCAmelCase__ ( self : str , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict=None ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for a, b in zip(snake_case_ , snake_case_ ):
self.assertAlmostEqual(snake_case_ , snake_case_ , delta=snake_case_ , msg=snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCamelCase_: List[str] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCamelCase_, UpperCamelCase_: Optional[int] = data
UpperCamelCase_: int = scheduler_func(self.optimizer , **snake_case_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCamelCase_: Any = unwrap_schedule(snake_case_ , self.num_steps )
self.assertListAlmostEqual(
snake_case_ , snake_case_ , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
UpperCamelCase_: Optional[int] = scheduler_func(self.optimizer , **snake_case_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case_ ) # wrap to test picklability of the schedule
UpperCamelCase_: Union[str, Any] = unwrap_and_save_reload_schedule(snake_case_ , self.num_steps )
self.assertListEqual(snake_case_ , snake_case_ , msg=f'''failed for {scheduler_func} in save and reload''' )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : Optional[int] ):
UpperCamelCase_: List[str] = fn
def __call__( self : Union[str, Any] , *snake_case_ : str , **snake_case_ : Any ):
return self.fn(*snake_case_ , **snake_case_ )
@classmethod
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Optional[int] ):
UpperCamelCase_: Any = list(map(self , scheduler.lr_lambdas ) )
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def A__ ( lowerCamelCase="" ) -> str:
UpperCamelCase_: List[str] = tempfile.mkdtemp()
return os.path.join(lowerCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase_: List[Any] = AgentAudio(snake_case_ )
UpperCamelCase_: str = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase_, UpperCamelCase_: Optional[Any] = sf.read(snake_case_ )
self.assertTrue(torch.allclose(snake_case_ , torch.tensor(snake_case_ ) , atol=1e-4 ) )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase_: Optional[Any] = get_new_path(suffix=""".wav""" )
sf.write(snake_case_ , snake_case_ , 1_6000 )
UpperCamelCase_: Optional[Any] = AgentAudio(snake_case_ )
self.assertTrue(torch.allclose(snake_case_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , snake_case_ )
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = torch.randint(0 , 256 , (64, 64, 3) )
UpperCamelCase_: Tuple = AgentImage(snake_case_ )
UpperCamelCase_: Dict = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case_ ) )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Any = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
UpperCamelCase_: Dict = Image.open(snake_case_ )
UpperCamelCase_: List[str] = AgentImage(snake_case_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case_ ) )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[int] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
UpperCamelCase_: List[Any] = Image.open(snake_case_ )
UpperCamelCase_: int = AgentImage(snake_case_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case_ ) )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Optional[Any] = """Hey!"""
UpperCamelCase_: int = AgentText(snake_case_ )
self.assertEqual(snake_case_ , agent_type.to_string() )
self.assertEqual(snake_case_ , agent_type.to_raw() )
self.assertEqual(snake_case_ , snake_case_ )
| 670 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : str = DanceDiffusionPipeline
__UpperCamelCase : Optional[Any] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__UpperCamelCase : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=snake_case_ , use_timestep_embedding=snake_case_ , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase_: Optional[int] = IPNDMScheduler()
UpperCamelCase_: Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] , snake_case_ : int=0 ):
if str(snake_case_ ).startswith("""mps""" ):
UpperCamelCase_: List[Any] = torch.manual_seed(snake_case_ )
else:
UpperCamelCase_: List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase_: Tuple = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_: Any = self.get_dummy_components()
UpperCamelCase_: Optional[Any] = DanceDiffusionPipeline(**snake_case_ )
UpperCamelCase_: str = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Union[str, Any] = self.get_dummy_inputs(snake_case_ )
UpperCamelCase_: Union[str, Any] = pipe(**snake_case_ )
UpperCamelCase_: Optional[int] = output.audios
UpperCamelCase_: str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase_: Tuple = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__ ( self : Any ):
return super().test_save_load_local()
@skip_mps
def lowerCAmelCase__ ( self : Any ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowerCAmelCase__ ( self : str ):
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase__ ( self : str ):
return super().test_attention_slicing_forward_pass()
def lowerCAmelCase__ ( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Optional[int] = torch_device
UpperCamelCase_: Tuple = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase_: List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[Any] = torch.manual_seed(0 )
UpperCamelCase_: int = pipe(generator=snake_case_ , num_inference_steps=100 , audio_length_in_s=4.096 )
UpperCamelCase_: Optional[int] = output.audios
UpperCamelCase_: Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_: List[Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Tuple = torch_device
UpperCamelCase_: Union[str, Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase_: List[Any] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: Optional[int] = torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = pipe(generator=snake_case_ , num_inference_steps=100 , audio_length_in_s=4.096 )
UpperCamelCase_: Optional[Any] = output.audios
UpperCamelCase_: Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase_: List[Any] = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Optional[int] = logging.getLogger()
def A__ ( lowerCamelCase ) -> Tuple:
UpperCamelCase_: Any = {}
UpperCamelCase_: Dict = os.path.join(lowerCamelCase , """all_results.json""" )
if os.path.exists(lowerCamelCase ):
with open(lowerCamelCase , """r""" ) as f:
UpperCamelCase_: Any = json.load(lowerCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
lowerCamelCase_ : Dict = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ):
import xla_spawn
UpperCamelCase_: List[str] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: List[str] = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
UpperCamelCase_: Tuple = time()
xla_spawn.main()
UpperCamelCase_: Dict = time()
UpperCamelCase_: List[Any] = get_results(snake_case_ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def lowerCAmelCase__ ( self : Tuple ):
import xla_spawn
UpperCamelCase_: str = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
xla_spawn.main()
| 670 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A__ ( lowerCamelCase ) -> Dict[str, torch.Tensor]:
UpperCamelCase_: str = []
UpperCamelCase_: Optional[int] = []
UpperCamelCase_: List[str] = []
for rt in rc.restypes:
UpperCamelCase_: Any = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCamelCase_: Any = {name: i for i, name in enumerate(lowerCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCamelCase_: Dict = torch.tensor(
lowerCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase_: Union[str, Any] = torch.tensor(
lowerCamelCase , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase_: List[Any] = torch.tensor(
lowerCamelCase , dtype=torch.floataa , device=protein["""aatype"""].device , )
UpperCamelCase_: int = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCamelCase_: Union[str, Any] = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase_: Union[str, Any] = restype_atomaa_mask[protein_aatype]
UpperCamelCase_: str = residx_atomaa_mask
UpperCamelCase_: List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCamelCase_: Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase_: Tuple = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCamelCase_: int = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCamelCase_: Union[str, Any] = rc.restype_atoa[restype_letter]
UpperCamelCase_: str = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCamelCase_: str = rc.atom_order[atom_name]
UpperCamelCase_: Optional[Any] = 1
UpperCamelCase_: Tuple = restype_atomaa_mask[protein_aatype]
UpperCamelCase_: List[str] = residx_atomaa_mask
return protein
def A__ ( lowerCamelCase ) -> Dict[str, np.ndarray]:
UpperCamelCase_: Optional[Any] = tree_map(lambda lowerCamelCase : torch.tensor(lowerCamelCase , device=batch["""aatype"""].device ) , lowerCamelCase , np.ndarray )
UpperCamelCase_: Dict = tensor_tree_map(lambda lowerCamelCase : np.array(lowerCamelCase ) , make_atomaa_masks(lowerCamelCase ) )
return out
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : List[str] = logging.getLogger()
def A__ ( lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
UpperCamelCase_: int = """\n""".join(lowerCamelCase )
Path(lowerCamelCase ).open("""w""" ).writelines(lowerCamelCase )
lowerCamelCase_ : Tuple = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase_ : str = """sshleifer/bart-tiny-random"""
lowerCamelCase_ : List[Any] = """sshleifer/tiny-mbart"""
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
UpperCamelCase_: Optional[int] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
UpperCamelCase_: Any = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(snake_case_ , snake_case_ )
UpperCamelCase_: Union[str, Any] = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
UpperCamelCase_: Tuple = """translation_en_to_de""" if model == T5_TINY else """summarization"""
UpperCamelCase_: List[str] = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(snake_case_ , """argv""" , snake_case_ ):
run_generate()
assert Path(snake_case_ ).exists()
# os.remove(Path(output_file_name))
def lowerCAmelCase__ ( self : List[str] ):
self.run_eval_tester(snake_case_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : List[Any] ):
self.run_eval_tester(snake_case_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[int] ):
UpperCamelCase_: Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
UpperCamelCase_: Any = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
UpperCamelCase_: Tuple = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
UpperCamelCase_: List[Any] = Path(self.get_auto_remove_tmp_dir() )
UpperCamelCase_: Dict = str(tmp_dir / """scores.json""" )
UpperCamelCase_: Any = str(tmp_dir / """val.target""" )
_dump_articles(snake_case_ , text["""en"""] )
_dump_articles(snake_case_ , text["""de"""] )
UpperCamelCase_: List[Any] = """translation_en_to_de""" if model == T5_TINY else """summarization"""
UpperCamelCase_: Tuple = f'''
run_eval_search.py
{model}
{str(snake_case_ )}
{str(snake_case_ )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(snake_case_ , """argv""" , snake_case_ ):
with CaptureStdout() as cs:
run_search()
UpperCamelCase_: Optional[int] = [""" num_beams | length_penalty""", model, """Best score args"""]
UpperCamelCase_: Union[str, Any] = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(snake_case_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case_ ).exists()
os.remove(Path(snake_case_ ) )
| 670 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase_ : Any = logging.get_logger(__name__)
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
UpperCamelCase_: Any = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase , config=lowerCamelCase )
UpperCamelCase_: Dict = downstream_dict["""projector.weight"""]
UpperCamelCase_: List[str] = downstream_dict["""projector.bias"""]
UpperCamelCase_: Dict = downstream_dict["""model.post_net.linear.weight"""]
UpperCamelCase_: Optional[Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
UpperCamelCase_: List[str] = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase , config=lowerCamelCase )
UpperCamelCase_: Any = downstream_dict["""model.linear.weight"""]
UpperCamelCase_: Tuple = downstream_dict["""model.linear.bias"""]
return model
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: Optional[int] = UniSpeechSatForXVector.from_pretrained(lowerCamelCase , config=lowerCamelCase )
UpperCamelCase_: Union[str, Any] = downstream_dict["""connector.weight"""]
UpperCamelCase_: Tuple = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCamelCase_: int = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
UpperCamelCase_: List[str] = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
UpperCamelCase_: str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
UpperCamelCase_: Tuple = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
UpperCamelCase_: List[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
UpperCamelCase_: int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
UpperCamelCase_: Dict = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
UpperCamelCase_: Optional[Any] = torch.load(lowerCamelCase , map_location="""cpu""" )
UpperCamelCase_: Any = checkpoint["""Downstream"""]
UpperCamelCase_: str = UniSpeechSatConfig.from_pretrained(lowerCamelCase )
UpperCamelCase_: Any = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase , return_attention_mask=lowerCamelCase , do_normalize=lowerCamelCase )
UpperCamelCase_: Dict = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
UpperCamelCase_: Optional[int] = convert_classification(lowerCamelCase , lowerCamelCase , lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
UpperCamelCase_: Dict = convert_diarization(lowerCamelCase , lowerCamelCase , lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
UpperCamelCase_: List[Any] = convert_xvector(lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
UpperCamelCase_: str = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowerCamelCase_ : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 670 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 1 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A__ ( lowerCamelCase ) -> List[str]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def A__ ( lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: List[str] = np.max(_outputs , axis=-1 , keepdims=lowerCamelCase )
UpperCamelCase_: Tuple = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCamelCase )
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Tuple = """sigmoid"""
__UpperCamelCase : Optional[Any] = """softmax"""
__UpperCamelCase : Any = """none"""
@add_end_docstrings(
_A , r"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Dict = False
__UpperCamelCase : List[str] = ClassificationFunction.NONE
def __init__( self : Optional[int] , **snake_case_ : Optional[int] ):
super().__init__(**snake_case_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[int]=None , snake_case_ : int=None , snake_case_ : Dict="" , **snake_case_ : List[str] ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
UpperCamelCase_: Any = tokenizer_kwargs
UpperCamelCase_: str = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
UpperCamelCase_: Optional[Any] = self.model.config.return_all_scores
if isinstance(snake_case_ , snake_case_ ) or top_k is None:
UpperCamelCase_: Any = top_k
UpperCamelCase_: Optional[int] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , snake_case_ , )
if return_all_scores:
UpperCamelCase_: Tuple = None
else:
UpperCamelCase_: Union[str, Any] = 1
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
UpperCamelCase_: Any = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , *snake_case_ : Any , **snake_case_ : Dict ):
UpperCamelCase_: str = super().__call__(*snake_case_ , **snake_case_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
UpperCamelCase_: List[Any] = """top_k""" not in kwargs
if isinstance(args[0] , snake_case_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCAmelCase__ ( self : Tuple , snake_case_ : Optional[int] , **snake_case_ : int ):
UpperCamelCase_: List[str] = self.framework
if isinstance(snake_case_ , snake_case_ ):
return self.tokenizer(**snake_case_ , return_tensors=snake_case_ , **snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1 and isinstance(inputs[0] , snake_case_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case_ , **snake_case_ )
elif isinstance(snake_case_ , snake_case_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : Union[str, Any] ):
return self.model(**snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : str , snake_case_ : List[str]=None , snake_case_ : Any=1 , snake_case_ : Optional[int]=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
UpperCamelCase_: Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
UpperCamelCase_: Optional[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
UpperCamelCase_: Union[str, Any] = self.model.config.function_to_apply
else:
UpperCamelCase_: List[str] = ClassificationFunction.NONE
UpperCamelCase_: Any = model_outputs["""logits"""][0]
UpperCamelCase_: List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
UpperCamelCase_: Dict = sigmoid(snake_case_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
UpperCamelCase_: Union[str, Any] = softmax(snake_case_ )
elif function_to_apply == ClassificationFunction.NONE:
UpperCamelCase_: List[Any] = outputs
else:
raise ValueError(f'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
UpperCamelCase_: str = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(snake_case_ )
]
if not _legacy:
dict_scores.sort(key=lambda snake_case_ : x["score"] , reverse=snake_case_ )
if top_k is not None:
UpperCamelCase_: Optional[Any] = dict_scores[:top_k]
return dict_scores
| 670 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 670 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : NestedDataStructureLike[PathLike] , snake_case_ : Optional[NamedSplit] = None , snake_case_ : Optional[Features] = None , snake_case_ : str = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : Optional[str] = None , snake_case_ : Optional[int] = None , **snake_case_ : Optional[Any] , ):
super().__init__(
snake_case_ , split=snake_case_ , features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , streaming=snake_case_ , num_proc=snake_case_ , **snake_case_ , )
UpperCamelCase_: Optional[int] = field
UpperCamelCase_: int = path_or_paths if isinstance(snake_case_ , snake_case_ ) else {self.split: path_or_paths}
UpperCamelCase_: int = Json(
cache_dir=snake_case_ , data_files=snake_case_ , features=snake_case_ , field=snake_case_ , **snake_case_ , )
def lowerCAmelCase__ ( self : Optional[Any] ):
# Build iterable dataset
if self.streaming:
UpperCamelCase_: List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
UpperCamelCase_: Any = None
UpperCamelCase_: int = None
UpperCamelCase_: Tuple = None
UpperCamelCase_: str = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , num_proc=self.num_proc , )
UpperCamelCase_: Union[str, Any] = self.builder.as_dataset(
split=self.split , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Dataset , snake_case_ : Union[PathLike, BinaryIO] , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , **snake_case_ : Tuple , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
UpperCamelCase_: Any = dataset
UpperCamelCase_: int = path_or_buf
UpperCamelCase_: str = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCamelCase_: List[Any] = num_proc
UpperCamelCase_: List[Any] = """utf-8"""
UpperCamelCase_: Optional[Any] = to_json_kwargs
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: int = self.to_json_kwargs.pop("""path_or_buf""" , snake_case_ )
UpperCamelCase_: Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
UpperCamelCase_: Any = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
UpperCamelCase_: Optional[int] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
UpperCamelCase_: Any = self.to_json_kwargs.pop("""compression""" , snake_case_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=snake_case_ ) as buffer:
UpperCamelCase_: Tuple = self._write(file_obj=snake_case_ , orient=snake_case_ , lines=snake_case_ , index=snake_case_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""" )
UpperCamelCase_: List[Any] = self._write(
file_obj=self.path_or_buf , orient=snake_case_ , lines=snake_case_ , index=snake_case_ , **self.to_json_kwargs )
return written
def lowerCAmelCase__ ( self : Dict , snake_case_ : List[str] ):
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = args
UpperCamelCase_: Union[str, Any] = query_table(
table=self.dataset.data , key=slice(snake_case_ , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCamelCase_: int = batch.to_pandas().to_json(
path_or_buf=snake_case_ , orient=snake_case_ , lines=snake_case_ , index=snake_case_ , **snake_case_ )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase__ ( self : Dict , snake_case_ : BinaryIO , snake_case_ : int , snake_case_ : Tuple , snake_case_ : int , **snake_case_ : str , ):
UpperCamelCase_: List[Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
UpperCamelCase_: Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(snake_case_ )
else:
UpperCamelCase_, UpperCamelCase_: int = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , snake_case_ , snake_case_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(snake_case_ )
return written
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = 1_60_00 ) -> Dict:
UpperCamelCase_: Optional[Any] = int(round(sample_rate * max_length ) )
if len(lowerCamelCase ) <= sample_length:
return wav
UpperCamelCase_: List[str] = randint(0 , len(lowerCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """Name of a dataset from the datasets package"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """A file containing the training audio paths and labels."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
__UpperCamelCase : str = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
__UpperCamelCase : str = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
__UpperCamelCase : str = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
__UpperCamelCase : str = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : float = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Name or path of preprocessor config."""} )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__UpperCamelCase : Optional[bool] = field(
default=_A , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""will be removed in a future version. Use `--freeze_feature_encoder`"""
"""instead. Setting `freeze_feature_encoder==True`.""" , snake_case_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"""The argument `--freeze_feature_extractor` is deprecated and """
"""should not be used in combination with `--freeze_feature_encoder`."""
"""Only make use of `--freeze_feature_encoder`.""" )
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_audio_classification""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: int = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase_: Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to train from scratch.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase_: Union[str, Any] = DatasetDict()
UpperCamelCase_: Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--audio_column_name` to the correct audio column - one of """
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"""Make sure to set `--label_column_name` to the correct text column - one of """
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase_: Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase_: Any = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase_: Dict = feature_extractor.model_input_names[0]
def train_transforms(lowerCamelCase ):
UpperCamelCase_: List[Any] = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase_: Any = random_subsample(
audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = feature_extractor(lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase_: Tuple = {model_input_name: inputs.get(lowerCamelCase )}
UpperCamelCase_: Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCamelCase ):
UpperCamelCase_: Any = [audio["""array"""] for audio in batch[data_args.audio_column_name]]
UpperCamelCase_: List[Any] = feature_extractor(lowerCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase_: Union[str, Any] = {model_input_name: inputs.get(lowerCamelCase )}
UpperCamelCase_: Dict = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase_: List[str] = raw_datasets["""train"""].features[data_args.label_column_name].names
UpperCamelCase_, UpperCamelCase_: Any = {}, {}
for i, label in enumerate(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = str(lowerCamelCase )
UpperCamelCase_: List[str] = label
# Load the accuracy metric from the datasets package
UpperCamelCase_: Optional[int] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCamelCase ):
UpperCamelCase_: str = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCamelCase , references=eval_pred.label_ids )
UpperCamelCase_: Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCamelCase ) , labelaid=lowerCamelCase , idalabel=lowerCamelCase , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Dict = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase_: List[Any] = (
raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCamelCase , output_all_columns=lowerCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase_: List[str] = (
raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCamelCase , output_all_columns=lowerCamelCase )
# Initialize our trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=lowerCamelCase , tokenizer=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: Dict = last_checkpoint
UpperCamelCase_: Union[str, Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase_: Tuple = trainer.evaluate()
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """audio-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""audio-classification"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
if __name__ == "__main__":
main()
| 670 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Any = MvpTokenizer
__UpperCamelCase : Union[str, Any] = MvpTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = filter_roberta_detectors
def lowerCAmelCase__ ( self : Tuple ):
super().setUp()
UpperCamelCase_: Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase_: Optional[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase_: Tuple = {"""unk_token""": """<unk>"""}
UpperCamelCase_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCAmelCase__ ( self : Optional[Any] , **snake_case_ : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , **snake_case_ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Optional[int] ):
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self : Optional[int] ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCAmelCase__ ( self : Dict ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Tuple = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase_: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Any = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Union[str, Any] = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__ ( self : Union[str, Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: str = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = ["""A long paragraph for summarization."""]
UpperCamelCase_: List[str] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Tuple = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="""pt""" )
UpperCamelCase_: int = inputs["""input_ids"""]
UpperCamelCase_: Union[str, Any] = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self : Optional[int] ):
pass
def lowerCAmelCase__ ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Dict = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
UpperCamelCase_: Tuple = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
UpperCamelCase_: List[str] = """A, <mask> AllenNLP sentence."""
UpperCamelCase_: str = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
UpperCamelCase_: Dict = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCamelCase_: List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCamelCase_: int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 670 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase_ : Dict = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls : str ):
UpperCamelCase_: Any = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase_: Dict = FlaxBertModel(snake_case_ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
UpperCamelCase_: Dict = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCamelCase_: List[str] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(snake_case_ , repo_id="""test-model-flax""" , push_to_hub=snake_case_ , use_auth_token=self._token )
UpperCamelCase_: Tuple = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
UpperCamelCase_: int = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCamelCase_: Dict = FlaxBertModel(snake_case_ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
UpperCamelCase_: Any = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCamelCase_: List[Any] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
snake_case_ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=snake_case_ , use_auth_token=self._token )
UpperCamelCase_: str = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
UpperCamelCase_: List[str] = flatten_dict(unfreeze(model.params ) )
UpperCamelCase_: Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCamelCase_: Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(snake_case_ , 1e-3 , msg=f'''{key} not identical''' )
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[int]:
UpperCamelCase_: str = True
UpperCamelCase_: Optional[Any] = flatten_dict(modela.params )
UpperCamelCase_: int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
UpperCamelCase_: Union[str, Any] = False
return models_are_equal
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase_: int = FlaxBertModel(snake_case_ )
UpperCamelCase_: Any = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case_ , snake_case_ ) )
with self.assertRaises(snake_case_ ):
UpperCamelCase_: List[Any] = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: Any = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertTrue(check_models_equal(snake_case_ , snake_case_ ) )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
UpperCamelCase_: Tuple = FlaxBertModel(snake_case_ )
UpperCamelCase_: Tuple = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(snake_case_ , snake_case_ ) , max_shard_size="""10KB""" )
with self.assertRaises(snake_case_ ):
UpperCamelCase_: Optional[int] = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: List[str] = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertTrue(check_models_equal(snake_case_ , snake_case_ ) )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[str] = """bert"""
UpperCamelCase_: Optional[int] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(snake_case_ ):
UpperCamelCase_: int = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: List[str] = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[Any] = """bert"""
UpperCamelCase_: str = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(snake_case_ ):
UpperCamelCase_: Optional[int] = FlaxBertModel.from_pretrained(snake_case_ )
UpperCamelCase_: int = FlaxBertModel.from_pretrained(snake_case_ , subfolder=snake_case_ )
self.assertIsNotNone(snake_case_ )
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Dict = logging.get_logger(__name__)
def A__ ( lowerCamelCase , lowerCamelCase=False ) -> List[str]:
UpperCamelCase_: Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
UpperCamelCase_: int = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
UpperCamelCase_: str = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCamelCase_: List[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCamelCase_: Any = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(lowerCamelCase )-1}''' )
if "norm" in key:
UpperCamelCase_: List[Any] = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCamelCase_: Optional[Any] = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
UpperCamelCase_: Dict = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(lowerCamelCase )-1}''' )
if "layer_norm1" in key:
UpperCamelCase_: Tuple = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCamelCase_: List[Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCamelCase_: List[str] = key[key.find("""block""" ) + len("""block""" )]
UpperCamelCase_: Optional[Any] = key.replace(F'''block{idx}''' , F'''block.{int(lowerCamelCase )-1}''' )
if "attn.q" in key:
UpperCamelCase_: int = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCamelCase_: int = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCamelCase_: List[Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCamelCase_: Dict = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCamelCase_: Any = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCamelCase_: Any = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCamelCase_: str = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCamelCase_: Tuple = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCamelCase_: List[str] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCamelCase_: Any = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(lowerCamelCase )-1}''' )
if key.startswith("""head""" ):
UpperCamelCase_: Union[str, Any] = key.replace("""head""" , """classifier""" )
UpperCamelCase_: Optional[int] = value
return new_state_dict
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCamelCase_: Any = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
UpperCamelCase_: Optional[int] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
UpperCamelCase_: Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCamelCase_: int = kv_bias[: config.hidden_sizes[i]]
UpperCamelCase_: str = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCamelCase_: Any = kv_bias[
config.hidden_sizes[i] :
]
def A__ ( ) -> Tuple:
UpperCamelCase_: Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase_: Tuple = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return image
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
UpperCamelCase_: Tuple = SegformerConfig()
UpperCamelCase_: Optional[int] = False
# set attributes based on model_name
UpperCamelCase_: List[str] = """huggingface/label-files"""
if "segformer" in model_name:
UpperCamelCase_: int = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
UpperCamelCase_: Optional[Any] = 1_50
UpperCamelCase_: str = """ade20k-id2label.json"""
UpperCamelCase_: Any = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
UpperCamelCase_: Union[str, Any] = 19
UpperCamelCase_: int = """cityscapes-id2label.json"""
UpperCamelCase_: int = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
UpperCamelCase_: int = True
UpperCamelCase_: Union[str, Any] = model_name[4:6]
UpperCamelCase_: Optional[int] = 10_00
UpperCamelCase_: Tuple = """imagenet-1k-id2label.json"""
UpperCamelCase_: List[str] = (1, 10_00)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
UpperCamelCase_: List[Any] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_: List[str] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_: Union[str, Any] = idalabel
UpperCamelCase_: List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCamelCase_: Any = [64, 1_28, 3_20, 5_12]
UpperCamelCase_: List[str] = 2_56
elif size == "b2":
UpperCamelCase_: Union[str, Any] = [64, 1_28, 3_20, 5_12]
UpperCamelCase_: int = 7_68
UpperCamelCase_: Any = [3, 4, 6, 3]
elif size == "b3":
UpperCamelCase_: Tuple = [64, 1_28, 3_20, 5_12]
UpperCamelCase_: Union[str, Any] = 7_68
UpperCamelCase_: Union[str, Any] = [3, 4, 18, 3]
elif size == "b4":
UpperCamelCase_: Optional[Any] = [64, 1_28, 3_20, 5_12]
UpperCamelCase_: Union[str, Any] = 7_68
UpperCamelCase_: Union[str, Any] = [3, 8, 27, 3]
elif size == "b5":
UpperCamelCase_: str = [64, 1_28, 3_20, 5_12]
UpperCamelCase_: str = 7_68
UpperCamelCase_: Union[str, Any] = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
UpperCamelCase_: str = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowerCamelCase , align=lowerCamelCase , do_random_crop=lowerCamelCase )
# prepare image
UpperCamelCase_: int = prepare_img()
UpperCamelCase_: Optional[Any] = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
UpperCamelCase_: Tuple = torch.load(lowerCamelCase , map_location=torch.device("""cpu""" ) )
else:
UpperCamelCase_: str = torch.load(lowerCamelCase , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
UpperCamelCase_: List[Any] = rename_keys(lowerCamelCase , encoder_only=lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowerCamelCase , lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
UpperCamelCase_: Optional[int] = False
UpperCamelCase_: Optional[Any] = SegformerForImageClassification(lowerCamelCase )
else:
UpperCamelCase_: Tuple = SegformerForSemanticSegmentation(lowerCamelCase )
model.load_state_dict(lowerCamelCase )
model.eval()
# forward pass
UpperCamelCase_: Any = model(lowerCamelCase )
UpperCamelCase_: Any = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCamelCase_: Dict = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCamelCase_: Union[str, Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCamelCase_: str = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCamelCase_: Optional[Any] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCamelCase_: List[str] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCamelCase_: Optional[int] = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCamelCase_: Optional[Any] = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCamelCase_: int = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCamelCase_: Dict = torch.tensor(
[
[
[-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1],
[-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1],
[-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1],
],
[
[-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1],
[-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1],
[-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1],
],
[
[7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2],
[4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1],
[3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCamelCase_: List[Any] = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCamelCase_: Any = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCamelCase_: Any = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCamelCase_: Optional[Any] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCamelCase_: Optional[Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCamelCase_: Any = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
UpperCamelCase_: Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
model.save_pretrained(lowerCamelCase )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase_ : str = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 670 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : int ):
torch.manual_seed(0 )
UpperCamelCase_: Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCamelCase_: Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self : Any ):
torch.manual_seed(0 )
UpperCamelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Union[str, Any] = self.dummy_uncond_unet
UpperCamelCase_: Optional[Any] = DDIMScheduler()
UpperCamelCase_: List[str] = self.dummy_vq_model
UpperCamelCase_: List[Any] = LDMPipeline(unet=snake_case_ , vqvae=snake_case_ , scheduler=snake_case_ )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: str = torch.manual_seed(0 )
UpperCamelCase_: int = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" ).images
UpperCamelCase_: Dict = torch.manual_seed(0 )
UpperCamelCase_: str = ldm(generator=snake_case_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=snake_case_ )[0]
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase_: Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase_: str = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
UpperCamelCase_: Optional[Any] = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Dict = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(snake_case_ )
ldm.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase_: List[str] = torch.manual_seed(0 )
UpperCamelCase_: Optional[int] = ldm(generator=snake_case_ , num_inference_steps=5 , output_type="""numpy""" ).images
UpperCamelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase_: List[str] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
UpperCamelCase_: Dict = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 670 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowerCamelCase ) -> Optional[Any]:
def decorator(lowerCamelCase ):
UpperCamelCase_: List[Any] = getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def A__ ( *lowerCamelCase ) -> List[Any]:
def decorator(lowerCamelCase ):
UpperCamelCase_: Any = getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __new__( cls : int , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Union[str, Any] ):
UpperCamelCase_: Tuple = super().__new__(cls , snake_case_ , snake_case_ , snake_case_ )
if not hasattr(snake_case_ , """key_handler""" ):
setattr(snake_case_ , """key_handler""" , {} )
setattr(snake_case_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase_: str = getattr(snake_case_ , """handle_key""" , [] )
for key in handled_keys:
UpperCamelCase_: Any = value
return new_cls
@staticmethod
def lowerCAmelCase__ ( cls : int ):
UpperCamelCase_: Union[str, Any] = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase_: Any = ord(snake_case_ )
UpperCamelCase_: List[str] = cls.key_handler.get(snake_case_ )
if handler:
UpperCamelCase_: List[Any] = char
return handler(cls )
else:
return None
def A__ ( cls ) -> List[Any]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 670 |
def A__ ( lowerCamelCase = 50 ) -> int:
UpperCamelCase_: List[Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 670 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = """encoder-decoder"""
__UpperCamelCase : Dict = True
def __init__( self : Tuple , **snake_case_ : Tuple ):
super().__init__(**snake_case_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
UpperCamelCase_: List[Any] = kwargs.pop("""encoder""" )
UpperCamelCase_: Union[str, Any] = encoder_config.pop("""model_type""" )
UpperCamelCase_: Optional[int] = kwargs.pop("""decoder""" )
UpperCamelCase_: str = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
UpperCamelCase_: List[Any] = AutoConfig.for_model(snake_case_ , **snake_case_ )
UpperCamelCase_: List[Any] = AutoConfig.for_model(snake_case_ , **snake_case_ )
UpperCamelCase_: Dict = True
@classmethod
def lowerCAmelCase__ ( cls : Any , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : Optional[Any] ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase_: List[str] = True
UpperCamelCase_: str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: int = copy.deepcopy(self.__dict__ )
UpperCamelCase_: Optional[Any] = self.encoder.to_dict()
UpperCamelCase_: Optional[int] = self.decoder.to_dict()
UpperCamelCase_: List[str] = self.__class__.model_type
return output
| 670 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
# Initialise PyTorch model
UpperCamelCase_: List[Any] = TaConfig.from_json_file(lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Any = TaForConditionalGeneration(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase_ : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 670 | 1 |
import math
import random
def A__ ( lowerCamelCase , lowerCamelCase = False ) -> float:
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase_ : Any = 0.02
def A__ ( lowerCamelCase , lowerCamelCase ) -> float:
UpperCamelCase_: Tuple = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(lowerCamelCase ):
# Forward propagation
UpperCamelCase_: Optional[int] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase_: Optional[int] = (expected / 1_00) - layer_a
# Error delta
UpperCamelCase_: Any = layer_1_error * sigmoid_function(lowerCamelCase , lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Dict = int(input("""Expected value: """))
lowerCamelCase_ : str = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : str = {
"""configuration_roformer""": ["""ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoFormerConfig""", """RoFormerOnnxConfig"""],
"""tokenization_roformer""": ["""RoFormerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""RoFormerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoFormerForCausalLM""",
"""RoFormerForMaskedLM""",
"""RoFormerForMultipleChoice""",
"""RoFormerForQuestionAnswering""",
"""RoFormerForSequenceClassification""",
"""RoFormerForTokenClassification""",
"""RoFormerLayer""",
"""RoFormerModel""",
"""RoFormerPreTrainedModel""",
"""load_tf_weights_in_roformer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRoFormerForCausalLM""",
"""TFRoFormerForMaskedLM""",
"""TFRoFormerForMultipleChoice""",
"""TFRoFormerForQuestionAnswering""",
"""TFRoFormerForSequenceClassification""",
"""TFRoFormerForTokenClassification""",
"""TFRoFormerLayer""",
"""TFRoFormerModel""",
"""TFRoFormerPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxRoFormerForMaskedLM""",
"""FlaxRoFormerForMultipleChoice""",
"""FlaxRoFormerForQuestionAnswering""",
"""FlaxRoFormerForSequenceClassification""",
"""FlaxRoFormerForTokenClassification""",
"""FlaxRoFormerModel""",
"""FlaxRoFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : str = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = "x" , lowerCamelCase = 10**-10 , lowerCamelCase = 1 , ) -> complex:
UpperCamelCase_: Optional[Any] = symbols(lowerCamelCase )
UpperCamelCase_: int = lambdify(lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Optional[Any] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase_: Tuple = starting_point
while True:
if diff_function(lowerCamelCase ) != 0:
UpperCamelCase_: List[Any] = prev_guess - multiplicity * func(lowerCamelCase ) / diff_function(
lowerCamelCase )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase_: Any = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 670 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCamelCase_ : str = {
"""/attention/""": """/0/SelfAttention/""",
"""/self_attention/""": """/0/SelfAttention/""",
"""/encoder_decoder_attention/""": """/1/EncDecAttention/""",
"""value""": """v""",
"""query""": """q""",
"""key""": """k""",
"""out""": """o""",
"""pre_self_attention_layer_norm""": """0/layer_norm""",
"""pre_cross_attention_layer_norm""": """1/layer_norm""",
"""pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong
"""token_embedder""": """shared""",
"""encoder_norm""": """final_layer_norm""",
"""decoder_norm""": """final_layer_norm""",
"""relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""",
"""router/router_weights/w/""": """router/classifier/""",
"""roer/roer_weights/w/""": """router/classifier/""",
"""logits_dense""": """lm_head""",
}
def A__ ( lowerCamelCase ) -> Optional[int]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
UpperCamelCase_: Union[str, Any] = list(s_dict.keys() )
for key in keys:
UpperCamelCase_: int = r""".*/layers_(\d+)"""
UpperCamelCase_: Any = key
if re.match(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: Tuple = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , lowerCamelCase )
UpperCamelCase_: Dict = r"""(encoder|decoder)\/"""
if re.match(lowerCamelCase , lowerCamelCase ):
UpperCamelCase_: Any = re.match(lowerCamelCase , lowerCamelCase ).groups()
if groups[0] == "encoder":
UpperCamelCase_: Union[str, Any] = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , lowerCamelCase )
UpperCamelCase_: Dict = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , lowerCamelCase )
elif groups[0] == "decoder":
UpperCamelCase_: Optional[int] = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , lowerCamelCase )
UpperCamelCase_: Optional[Any] = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , lowerCamelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
UpperCamelCase_: Any = new_key.replace(lowerCamelCase , lowerCamelCase )
print(F'''{key} -> {new_key}''' )
UpperCamelCase_: List[Any] = s_dict.pop(lowerCamelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase_: Optional[int] = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
UpperCamelCase_: Any = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
UpperCamelCase_: Optional[int] = s_dict[key].shape[0]
UpperCamelCase_: Optional[int] = s_dict[key]
for idx in range(lowerCamelCase ):
UpperCamelCase_: Any = expert_weihts[idx]
print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' )
s_dict.pop(lowerCamelCase )
return s_dict
lowerCamelCase_ : Union[str, Any] = {
"""NUM_ENCODER_LAYERS""": """num_layers""",
"""NUM_DECODER_LAYERS""": """num_decoder_layers""",
"""NUM_HEADS""": """num_heads""",
"""HEAD_DIM""": """d_kv""",
"""EMBED_DIM""": """d_model""",
"""MLP_DIM""": """d_ff""",
"""NUM_SELECTED_EXPERTS""": """num_selected_experts""",
"""NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""",
"""NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""",
"""dense.MlpBlock.activations""": """feed_forward_proj""",
}
def A__ ( lowerCamelCase , lowerCamelCase ) -> Tuple:
# Convert a google style config to the hugging face fromat
import regex as re
with open(lowerCamelCase , """r""" ) as f:
UpperCamelCase_: Union[str, Any] = f.read()
UpperCamelCase_: Optional[Any] = re.findall(r"""(.*) = ([0-9.]*)""" , lowerCamelCase )
UpperCamelCase_: Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
UpperCamelCase_: Dict = float(lowerCamelCase ) if """.""" in value else int(lowerCamelCase )
UpperCamelCase_: Any = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , lowerCamelCase )[0]
UpperCamelCase_: Tuple = str(activation[1] )
UpperCamelCase_: Any = num_experts
UpperCamelCase_: List[str] = SwitchTransformersConfig(**lowerCamelCase )
return config
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase="./" , lowerCamelCase=8 ) -> List[str]:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
UpperCamelCase_: Optional[int] = checkpoints.load_tax_checkpoint(lowerCamelCase )
if gin_file is not None:
UpperCamelCase_: List[Any] = convert_gin_to_config(lowerCamelCase , lowerCamelCase )
else:
UpperCamelCase_: Optional[int] = SwitchTransformersConfig.from_pretrained(lowerCamelCase )
UpperCamelCase_: List[str] = SwitchTransformersForConditionalGeneration(lowerCamelCase )
UpperCamelCase_: Union[str, Any] = flax_params["""target"""]
UpperCamelCase_: int = flatten_dict(lowerCamelCase , sep="""/""" )
UpperCamelCase_: Tuple = rename_keys(lowerCamelCase )
UpperCamelCase_: Optional[Any] = unflatten_dict(lowerCamelCase , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCamelCase , lowerCamelCase )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"""
""" model architecture. If not provided, a `gin_file` has to be provided."""
),
)
parser.add_argument(
"""--gin_file""",
default=None,
type=str,
required=False,
help="""Path to the gin config file. If not provided, a `config_file` has to be passed """,
)
parser.add_argument(
"""--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model."""
)
parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""")
lowerCamelCase_ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 670 |
from manim import *
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_: Tuple = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Tuple = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Union[str, Any] = VGroup(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[Any] = Text("""CPU""" , font_size=24 )
UpperCamelCase_: int = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case_ )
UpperCamelCase_: Optional[int] = [mem.copy() for i in range(1 )]
UpperCamelCase_: Dict = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Optional[int] = Text("""GPU""" , font_size=24 )
UpperCamelCase_: Optional[int] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
gpu.align_to(snake_case_ , snake_case_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(snake_case_ )
UpperCamelCase_: Dict = [mem.copy() for i in range(6 )]
UpperCamelCase_: List[str] = VGroup(*snake_case_ ).arrange(snake_case_ , buff=0 )
UpperCamelCase_: Any = Text("""Model""" , font_size=24 )
UpperCamelCase_: Optional[Any] = Group(snake_case_ , snake_case_ ).arrange(snake_case_ , buff=0.5 , aligned_edge=snake_case_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , Create(snake_case_ , run_time=1 ) , )
UpperCamelCase_: List[Any] = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
UpperCamelCase_: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_: Union[str, Any] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case_ , run_time=2.5 ) , Write(snake_case_ ) , Write(snake_case_ ) )
self.add(snake_case_ )
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Union[str, Any] = []
UpperCamelCase_: Tuple = []
for i, rect in enumerate(snake_case_ ):
UpperCamelCase_: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case_ , opacity=0.7 )
cpu_target.move_to(snake_case_ )
cpu_target.generate_target()
UpperCamelCase_: int = 0.46 / 4
UpperCamelCase_: Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case_ , buff=0.0 )
cpu_targs.append(snake_case_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case_ ) )
second_animations.append(MoveToTarget(snake_case_ , run_time=1.5 ) )
self.play(*snake_case_ )
self.play(*snake_case_ )
self.wait()
| 670 | 1 |
import argparse
import os
import re
lowerCamelCase_ : int = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase_ : str = re.compile(r"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
lowerCamelCase_ : Optional[Any] = re.compile(r"""\s*\(\s*\"(\S[^\"]+)\"""")
def A__ ( lowerCamelCase , lowerCamelCase = False ) -> List[str]:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase_: int = f.read()
UpperCamelCase_: Union[str, Any] = content.split("""\n""" )
UpperCamelCase_: Tuple = []
UpperCamelCase_: List[str] = 0
while line_idx < len(lowerCamelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCamelCase_: Union[str, Any] = len(re.search(r"""^(\s*)\S""" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCamelCase_: Tuple = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCamelCase_: Optional[Any] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCamelCase_: Tuple = sorted(lowerCamelCase , key=lambda lowerCamelCase : _re_identifier.search(lowerCamelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowerCamelCase ) )
elif "\n".join(lowerCamelCase ) != content:
return True
def A__ ( lowerCamelCase = False ) -> Union[str, Any]:
UpperCamelCase_: Optional[int] = [os.path.join(lowerCamelCase , lowerCamelCase ) for f in os.listdir(lowerCamelCase ) if f.endswith(""".py""" )]
UpperCamelCase_: Dict = [sort_auto_mapping(lowerCamelCase , overwrite=lowerCamelCase ) for fname in fnames]
if not overwrite and any(lowerCamelCase ):
UpperCamelCase_: Any = [f for f, d in zip(lowerCamelCase , lowerCamelCase ) if d]
raise ValueError(
F'''The following files have auto mappings that need sorting: {", ".join(lowerCamelCase )}. Run `make style` to fix'''
""" this.""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCamelCase_ : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 670 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Union[str, Any] = """laion/clap-htsat-unfused"""
UpperCamelCase_: List[str] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Tuple , **snake_case_ : Optional[Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Dict = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[str] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Optional[Any] = floats_list((3, 1000) )
UpperCamelCase_: List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: int = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: List[Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = """This is a test string"""
UpperCamelCase_: Tuple = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: List[str] = self.get_feature_extractor()
UpperCamelCase_: Any = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Tuple = processor.batch_decode(snake_case_ )
UpperCamelCase_: str = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : int = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""LayoutLMv2FeatureExtractor"""]
lowerCamelCase_ : Any = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
import warnings
from ..trainer import Trainer
from ..utils import logging
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : Tuple=None , **snake_case_ : List[str] ):
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 670 | 1 |
import re
import string
import numpy as np
import datasets
lowerCamelCase_ : Optional[int] = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
lowerCamelCase_ : Optional[Any] = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
lowerCamelCase_ : int = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=None , snake_case_ : int=False , snake_case_ : Dict=False , snake_case_ : List[str]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase_: int = np.array([re.sub(snake_case_ , """""" , snake_case_ ) for x in predictions] )
UpperCamelCase_: Tuple = np.array([re.sub(snake_case_ , """""" , snake_case_ ) for x in references] )
else:
UpperCamelCase_: int = np.asarray(snake_case_ )
UpperCamelCase_: Optional[Any] = np.asarray(snake_case_ )
if ignore_case:
UpperCamelCase_: List[str] = np.char.lower(snake_case_ )
UpperCamelCase_: Optional[Any] = np.char.lower(snake_case_ )
if ignore_punctuation:
UpperCamelCase_: Tuple = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCamelCase_: List[str] = np.char.translate(snake_case_ , table=snake_case_ )
UpperCamelCase_: List[str] = np.char.translate(snake_case_ , table=snake_case_ )
if ignore_numbers:
UpperCamelCase_: str = string.digits.maketrans("""""" , """""" , string.digits )
UpperCamelCase_: List[str] = np.char.translate(snake_case_ , table=snake_case_ )
UpperCamelCase_: int = np.char.translate(snake_case_ , table=snake_case_ )
UpperCamelCase_: Dict = predictions == references
return {"exact_match": np.mean(snake_case_ ) * 100}
| 670 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ : Union[str, Any] = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""LayoutLMv3FeatureExtractor"""]
lowerCamelCase_ : List[Any] = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowerCamelCase_ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
lowerCamelCase_ : Optional[Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase_ : Union[str, Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase_ : Optional[Any] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 670 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ViTImageProcessor if is_vision_available() else None
@property
def lowerCAmelCase__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = (3, 32, 128)
UpperCamelCase_: Optional[Any] = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_: Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCamelCase_: Optional[Any] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
UpperCamelCase_: List[Any] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
UpperCamelCase_: Any = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , **snake_case_ : List[Any] ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : str ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: int = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
UpperCamelCase_: Any = Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) )
return image_input
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: int = self.get_tokenizer()
UpperCamelCase_: Any = self.get_image_processor()
UpperCamelCase_: Any = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Optional[int] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: List[str] = self.get_image_processor()
UpperCamelCase_: List[str] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: Optional[Any] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self.get_image_processor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Dict = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Optional[int] = self.prepare_image_inputs()
UpperCamelCase_: Optional[int] = image_processor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: Union[str, Any] = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: str = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Dict = """test"""
UpperCamelCase_: Dict = processor(text=snake_case_ )
UpperCamelCase_: Any = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Optional[Any] = self.get_image_processor()
UpperCamelCase_: List[str] = self.get_tokenizer()
UpperCamelCase_: int = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Tuple = """test"""
UpperCamelCase_: int = self.prepare_image_inputs()
UpperCamelCase_: str = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Union[str, Any] = self.get_image_processor()
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: Any = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: Union[str, Any] = processor.char_decode(snake_case_ )
UpperCamelCase_: int = tokenizer.batch_decode(snake_case_ )
UpperCamelCase_: int = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: Dict = self.get_tokenizer()
UpperCamelCase_: Dict = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: Optional[int] = self.prepare_image_inputs()
UpperCamelCase_: List[str] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: str = self.get_image_processor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
UpperCamelCase_: Any = torch.randn(1 , 27 , 38 )
UpperCamelCase_: int = torch.randn(1 , 27 , 5_0257 )
UpperCamelCase_: Optional[int] = torch.randn(1 , 27 , 3_0522 )
UpperCamelCase_: List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 670 |
import cva
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , snake_case_ : float , snake_case_ : int ):
if k in (0.04, 0.06):
UpperCamelCase_: Union[str, Any] = k
UpperCamelCase_: Union[str, Any] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : int ):
return str(self.k )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : str ):
UpperCamelCase_: int = cva.imread(snake_case_ , 0 )
UpperCamelCase_, UpperCamelCase_: List[Any] = img.shape
UpperCamelCase_: list[list[int]] = []
UpperCamelCase_: int = img.copy()
UpperCamelCase_: Any = cva.cvtColor(snake_case_ , cva.COLOR_GRAY2RGB )
UpperCamelCase_, UpperCamelCase_: List[Any] = np.gradient(snake_case_ )
UpperCamelCase_: Optional[Any] = dx**2
UpperCamelCase_: Dict = dy**2
UpperCamelCase_: Optional[Any] = dx * dy
UpperCamelCase_: str = 0.04
UpperCamelCase_: int = self.window_size // 2
for y in range(snake_case_ , h - offset ):
for x in range(snake_case_ , w - offset ):
UpperCamelCase_: List[Any] = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: int = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCamelCase_: List[str] = (wxx * wyy) - (wxy**2)
UpperCamelCase_: Optional[int] = wxx + wyy
UpperCamelCase_: Dict = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : Any = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 670 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def A__ ( lowerCamelCase ) -> int:
if isinstance(lowerCamelCase , torch.Tensor ):
return image
elif isinstance(lowerCamelCase , PIL.Image.Image ):
UpperCamelCase_: Optional[int] = [image]
UpperCamelCase_: List[str] = [trans(img.convert("""RGB""" ) ) for img in image]
UpperCamelCase_: Dict = torch.stack(lowerCamelCase )
return image
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : Dict , snake_case_ : Dict , snake_case_ : Dict ):
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase_: Union[str, Any] = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : List[str] ):
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : List[str] ):
# get the original timestep using init_timestep
UpperCamelCase_: Dict = min(int(num_inference_steps * strength ) , snake_case_ )
UpperCamelCase_: Optional[int] = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase_: Any = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]=None ):
if not isinstance(snake_case_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case_ )}''' )
UpperCamelCase_: List[str] = image.to(device=snake_case_ , dtype=snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(snake_case_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCamelCase_: Optional[int] = init_latents.shape
UpperCamelCase_: Optional[int] = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
# get latents
print("""add noise to latents at timestep""" , snake_case_ )
UpperCamelCase_: Optional[int] = self.scheduler.add_noise(snake_case_ , snake_case_ , snake_case_ )
UpperCamelCase_: Tuple = init_latents
return latents
@torch.no_grad()
def __call__( self : str , snake_case_ : Union[torch.FloatTensor, PIL.Image.Image] = None , snake_case_ : float = 0.8 , snake_case_ : int = 1 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : float = 0.0 , snake_case_ : int = 50 , snake_case_ : Optional[bool] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
self.check_inputs(snake_case_ )
# 2. Preprocess image
UpperCamelCase_: Any = preprocess(snake_case_ )
# 3. set timesteps
self.scheduler.set_timesteps(snake_case_ , device=self.device )
UpperCamelCase_, UpperCamelCase_: List[str] = self.get_timesteps(snake_case_ , snake_case_ , self.device )
UpperCamelCase_: List[str] = timesteps[:1].repeat(snake_case_ )
# 4. Prepare latent variables
UpperCamelCase_: Tuple = self.prepare_latents(snake_case_ , snake_case_ , snake_case_ , self.unet.dtype , self.device , snake_case_ )
UpperCamelCase_: List[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(snake_case_ ):
# 1. predict noise model_output
UpperCamelCase_: List[str] = self.unet(snake_case_ , snake_case_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase_: Dict = self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , eta=snake_case_ , use_clipped_model_output=snake_case_ , generator=snake_case_ , ).prev_sample
UpperCamelCase_: Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase_: List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase_: List[str] = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=snake_case_ )
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = logging.get_logger("""transformers.models.speecht5""")
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
hf_model.apply_weight_norm()
UpperCamelCase_: Union[str, Any] = checkpoint["""input_conv.weight_g"""]
UpperCamelCase_: Optional[int] = checkpoint["""input_conv.weight_v"""]
UpperCamelCase_: List[Any] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.weight_g''']
UpperCamelCase_: Dict = checkpoint[F'''upsamples.{i}.1.weight_v''']
UpperCamelCase_: List[str] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
UpperCamelCase_: Tuple = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
UpperCamelCase_: Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
UpperCamelCase_: Any = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
UpperCamelCase_: int = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
UpperCamelCase_: int = checkpoint["""output_conv.1.weight_g"""]
UpperCamelCase_: Tuple = checkpoint["""output_conv.1.weight_v"""]
UpperCamelCase_: List[str] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , ) -> Optional[int]:
if config_path is not None:
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase )
else:
UpperCamelCase_: str = SpeechTaHifiGanConfig()
UpperCamelCase_: Union[str, Any] = SpeechTaHifiGan(lowerCamelCase )
UpperCamelCase_: str = torch.load(lowerCamelCase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowerCamelCase , lowerCamelCase )
UpperCamelCase_: Union[str, Any] = np.load(lowerCamelCase )
UpperCamelCase_: int = stats[0].reshape(-1 )
UpperCamelCase_: Union[str, Any] = stats[1].reshape(-1 )
UpperCamelCase_: Dict = torch.from_numpy(lowerCamelCase ).float()
UpperCamelCase_: Optional[Any] = torch.from_numpy(lowerCamelCase ).float()
model.save_pretrained(lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 670 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Optional[int] = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase_: Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Union[str, Any] = logging.get_verbosity()
UpperCamelCase_: int = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Union[str, Any] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case_ ) as cl:
logger.warning(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(snake_case_ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def lowerCAmelCase__ ( self : Optional[int] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: str = os.getenv("""TRANSFORMERS_VERBOSITY""" , snake_case_ )
UpperCamelCase_: Any = logging.log_levels[env_level_str]
UpperCamelCase_: Dict = logging.get_verbosity()
self.assertEqual(
snake_case_ , snake_case_ , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase_: str = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def lowerCAmelCase__ ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: str = logging.logging.getLogger()
with CaptureLogger(snake_case_ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase_: List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
UpperCamelCase_: Any = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case_ ) as cl:
logger.warning_advice(snake_case_ )
self.assertEqual(cl.out , msg + """\n""" )
def A__ ( ) -> Union[str, Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {
"""weiweishi/roc-bert-base-zh""": """https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json""",
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Tuple = """roc_bert"""
def __init__( self : Tuple , snake_case_ : Any=3_0522 , snake_case_ : int=768 , snake_case_ : List[Any]=12 , snake_case_ : Dict=12 , snake_case_ : Optional[int]=3072 , snake_case_ : Optional[Any]="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : Tuple=512 , snake_case_ : str=2 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : Optional[int]=1e-12 , snake_case_ : List[str]=True , snake_case_ : Any=0 , snake_case_ : int="absolute" , snake_case_ : int=None , snake_case_ : Optional[int]=True , snake_case_ : Union[str, Any]=True , snake_case_ : List[str]=768 , snake_case_ : int=910 , snake_case_ : Dict=512 , snake_case_ : List[str]=2_4858 , snake_case_ : str=True , **snake_case_ : Any , ):
UpperCamelCase_: Optional[Any] = vocab_size
UpperCamelCase_: Union[str, Any] = max_position_embeddings
UpperCamelCase_: Dict = hidden_size
UpperCamelCase_: Union[str, Any] = num_hidden_layers
UpperCamelCase_: Optional[Any] = num_attention_heads
UpperCamelCase_: Optional[int] = intermediate_size
UpperCamelCase_: Dict = hidden_act
UpperCamelCase_: Optional[Any] = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: Union[str, Any] = initializer_range
UpperCamelCase_: Optional[int] = type_vocab_size
UpperCamelCase_: Optional[Any] = layer_norm_eps
UpperCamelCase_: List[Any] = use_cache
UpperCamelCase_: Dict = enable_pronunciation
UpperCamelCase_: Dict = enable_shape
UpperCamelCase_: int = pronunciation_embed_dim
UpperCamelCase_: Dict = pronunciation_vocab_size
UpperCamelCase_: Union[str, Any] = shape_embed_dim
UpperCamelCase_: List[Any] = shape_vocab_size
UpperCamelCase_: int = concat_input
UpperCamelCase_: List[str] = position_embedding_type
UpperCamelCase_: Optional[Any] = classifier_dropout
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
| 670 |
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase_ : Optional[int] = HUGGINGFACE_HUB_CACHE
lowerCamelCase_ : List[str] = """config.json"""
lowerCamelCase_ : Any = """diffusion_pytorch_model.bin"""
lowerCamelCase_ : Union[str, Any] = """diffusion_flax_model.msgpack"""
lowerCamelCase_ : Dict = """model.onnx"""
lowerCamelCase_ : List[Any] = """diffusion_pytorch_model.safetensors"""
lowerCamelCase_ : Optional[Any] = """weights.pb"""
lowerCamelCase_ : Optional[Any] = """https://huggingface.co"""
lowerCamelCase_ : Union[str, Any] = default_cache_path
lowerCamelCase_ : Tuple = """diffusers_modules"""
lowerCamelCase_ : Optional[Any] = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
lowerCamelCase_ : str = ["""fp16""", """non-ema"""]
lowerCamelCase_ : List[Any] = """.self_attn"""
| 670 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase_ : Dict = get_logger(__name__)
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[Any] = """dummy_data"""
__UpperCamelCase : List[Any] = """datasets"""
__UpperCamelCase : Optional[Any] = False
def __init__( self : Dict , snake_case_ : str , snake_case_ : str , snake_case_ : Union[Version, str] , snake_case_ : Optional[str] = None , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[List[Callable]] = None , ):
UpperCamelCase_: Tuple = 0
UpperCamelCase_: int = dataset_name
UpperCamelCase_: Optional[int] = cache_dir
UpperCamelCase_: Optional[Any] = use_local_dummy_data
UpperCamelCase_: List[str] = config
# download_callbacks take a single url as input
UpperCamelCase_: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
UpperCamelCase_: Optional[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
UpperCamelCase_: Optional[Any] = str(snake_case_ )
# to be downloaded
UpperCamelCase_: Any = None
UpperCamelCase_: Tuple = None
@property
def lowerCAmelCase__ ( self : Tuple ):
if self._dummy_file is None:
UpperCamelCase_: int = self.download_dummy_data()
return self._dummy_file
@property
def lowerCAmelCase__ ( self : List[Any] ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("""dummy""" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("""dummy""" , self.version_name )
@property
def lowerCAmelCase__ ( self : str ):
return os.path.join(self.dummy_data_folder , """dummy_data.zip""" )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
UpperCamelCase_: Optional[Any] = cached_path(
snake_case_ , cache_dir=self.cache_dir , extract_compressed_file=snake_case_ , force_extract=snake_case_ )
return os.path.join(snake_case_ , self.dummy_file_name )
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
if self._bucket_url is None:
UpperCamelCase_: Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""" ) )
return self._bucket_url
@property
def lowerCAmelCase__ ( self : int ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , """/""" ).split("""/""" )[:-1] )
def lowerCAmelCase__ ( self : Any , snake_case_ : Optional[int] , *snake_case_ : Tuple ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
UpperCamelCase_: List[str] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
UpperCamelCase_: Dict = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case_ , snake_case_ ):
return self.create_dummy_data_dict(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , (list, tuple) ):
return self.create_dummy_data_list(snake_case_ , snake_case_ )
else:
return self.create_dummy_data_single(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : int , *snake_case_ : List[str] ):
return self.download_and_extract(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : List[Any] , snake_case_ : List[Any] ):
return self.download_and_extract(snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : Tuple , *snake_case_ : Dict , **snake_case_ : int ):
return path
def lowerCAmelCase__ ( self : Optional[Any] ):
return {}
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Any , snake_case_ : Optional[int] ):
UpperCamelCase_: List[Any] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case_ , snake_case_ ):
for single_url in single_urls:
download_callback(snake_case_ )
else:
UpperCamelCase_: List[str] = single_urls
download_callback(snake_case_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Dict = [os.path.join(snake_case_ , urllib.parse.quote_plus(Path(snake_case_ ).name ) ) for x in single_urls]
else:
UpperCamelCase_: Tuple = single_urls
UpperCamelCase_: str = os.path.join(snake_case_ , urllib.parse.quote_plus(Path(snake_case_ ).name ) )
UpperCamelCase_: List[Any] = value
# make sure that values are unique
if all(isinstance(snake_case_ , snake_case_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
UpperCamelCase_: str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowerCAmelCase__ ( self : int , snake_case_ : List[Any] , snake_case_ : Dict ):
UpperCamelCase_: List[str] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
UpperCamelCase_: Dict = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , snake_case_ ) ) for url in data_url )
UpperCamelCase_: int = all(
url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
UpperCamelCase_: List[str] = [data_url[0]] * len(snake_case_ )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCamelCase_: List[str] = os.path.join(snake_case_ , urllib.parse.quote_plus(single_url.split("""/""" )[-1] ) )
dummy_data_list.append(snake_case_ )
return dummy_data_list
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict , snake_case_ : List[Any] ):
for download_callback in self.download_callbacks:
download_callback(snake_case_ )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
UpperCamelCase_: Optional[Any] = os.path.join(snake_case_ , urllib.parse.quote_plus(data_url.split("""/""" )[-1] ) )
if os.path.exists(snake_case_ ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
def lowerCAmelCase__ ( self : Optional[int] ):
pass
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] ):
def _iter_archive_members(snake_case_ : Optional[Any] ):
# this preserves the order of the members inside the ZIP archive
UpperCamelCase_: Union[str, Any] = Path(self.dummy_file ).parent
UpperCamelCase_: Union[str, Any] = path.relative_to(snake_case_ )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
UpperCamelCase_: Union[str, Any] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case_ )
UpperCamelCase_: Optional[int] = Path(snake_case_ )
UpperCamelCase_: int = _iter_archive_members(snake_case_ ) if self.use_local_dummy_data else path.rglob("""*""" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((""".""", """__""") ):
yield file_path.relative_to(snake_case_ ).as_posix(), file_path.open("""rb""" )
def lowerCAmelCase__ ( self : Any , snake_case_ : Union[str, Any] ):
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: List[Any] = [paths]
for path in paths:
if os.path.isfile(snake_case_ ):
if os.path.basename(snake_case_ ).startswith((""".""", """__""") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case_ ):
if os.path.basename(snake_case_ ).startswith((""".""", """__""") ):
continue
dirnames.sort()
for filename in sorted(snake_case_ ):
if filename.startswith((""".""", """__""") ):
continue
yield os.path.join(snake_case_ , snake_case_ )
| 670 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCamelCase_: str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCamelCase_: Dict = [sys.executable] + distributed_args
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 670 | 1 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ["""image_processor"""]
__UpperCamelCase : Any = """SamImageProcessor"""
def __init__( self : Tuple , snake_case_ : Dict ):
super().__init__(snake_case_ )
UpperCamelCase_: int = self.image_processor
UpperCamelCase_: Optional[Any] = -10
UpperCamelCase_: Optional[Any] = self.image_processor.size["""longest_edge"""]
def __call__( self : Optional[int] , snake_case_ : Union[str, Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Any=None , snake_case_ : List[str]=None , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : List[str] , ):
UpperCamelCase_: Any = self.image_processor(
snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# pop arguments that are not used in the foward but used nevertheless
UpperCamelCase_: Optional[Any] = encoding_image_processor["""original_sizes"""]
if hasattr(snake_case_ , """numpy""" ): # Checks if Torch or TF tensor
UpperCamelCase_: Optional[int] = original_sizes.numpy()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = self._check_and_preprocess_points(
input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , )
UpperCamelCase_: Any = self._normalize_and_convert(
snake_case_ , snake_case_ , input_points=snake_case_ , input_labels=snake_case_ , input_boxes=snake_case_ , return_tensors=snake_case_ , )
return encoding_image_processor
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[Any]=None , snake_case_ : Union[str, Any]=None , snake_case_ : Optional[Any]=None , snake_case_ : Dict="pt" , ):
if input_points is not None:
if len(snake_case_ ) != len(snake_case_ ):
UpperCamelCase_: int = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0] ) for point in input_points
]
else:
UpperCamelCase_: Dict = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_ )
for point, original_size in zip(snake_case_ , snake_case_ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = self._pad_points_and_labels(snake_case_ , snake_case_ )
UpperCamelCase_: Optional[int] = np.array(snake_case_ )
if input_labels is not None:
UpperCamelCase_: Tuple = np.array(snake_case_ )
if input_boxes is not None:
if len(snake_case_ ) != len(snake_case_ ):
UpperCamelCase_: int = [
self._normalize_coordinates(self.target_size , snake_case_ , original_sizes[0] , is_bounding_box=snake_case_ )
for box in input_boxes
]
else:
UpperCamelCase_: Dict = [
self._normalize_coordinates(self.target_size , snake_case_ , snake_case_ , is_bounding_box=snake_case_ )
for box, original_size in zip(snake_case_ , snake_case_ )
]
UpperCamelCase_: Tuple = np.array(snake_case_ )
if input_boxes is not None:
if return_tensors == "pt":
UpperCamelCase_: List[Any] = torch.from_numpy(snake_case_ )
# boxes batch size of 1 by default
UpperCamelCase_: Union[str, Any] = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCamelCase_: Dict = tf.convert_to_tensor(snake_case_ )
# boxes batch size of 1 by default
UpperCamelCase_: str = tf.expand_dims(snake_case_ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCamelCase_: Optional[int] = torch.from_numpy(snake_case_ )
# point batch size of 1 by default
UpperCamelCase_: Union[str, Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCamelCase_: str = tf.convert_to_tensor(snake_case_ )
# point batch size of 1 by default
UpperCamelCase_: Optional[Any] = tf.expand_dims(snake_case_ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCamelCase_: Union[str, Any] = torch.from_numpy(snake_case_ )
# point batch size of 1 by default
UpperCamelCase_: str = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCamelCase_: List[Any] = tf.convert_to_tensor(snake_case_ )
# point batch size of 1 by default
UpperCamelCase_: Optional[Any] = tf.expand_dims(snake_case_ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def lowerCAmelCase__ ( self : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[Any] ):
UpperCamelCase_: Tuple = max([point.shape[0] for point in input_points] )
UpperCamelCase_: int = []
for i, point in enumerate(snake_case_ ):
if point.shape[0] != expected_nb_points:
UpperCamelCase_: Optional[int] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCamelCase_: int = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(snake_case_ )
UpperCamelCase_: List[Any] = processed_input_points
return input_points, input_labels
def lowerCAmelCase__ ( self : int , snake_case_ : int , snake_case_ : np.ndarray , snake_case_ : List[Any] , snake_case_ : Optional[int]=False ):
UpperCamelCase_, UpperCamelCase_: Dict = original_size
UpperCamelCase_, UpperCamelCase_: str = self.image_processor._get_preprocess_shape(snake_case_ , longest_edge=snake_case_ )
UpperCamelCase_: Tuple = deepcopy(snake_case_ ).astype(snake_case_ )
if is_bounding_box:
UpperCamelCase_: Optional[Any] = coords.reshape(-1 , 2 , 2 )
UpperCamelCase_: List[str] = coords[..., 0] * (new_w / old_w)
UpperCamelCase_: int = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCamelCase_: List[Any] = coords.reshape(-1 , 4 )
return coords
def lowerCAmelCase__ ( self : int , snake_case_ : Union[str, Any]=None , snake_case_ : Tuple=None , snake_case_ : Union[str, Any]=None , ):
if input_points is not None:
if hasattr(snake_case_ , """numpy""" ): # Checks for TF or Torch tensor
UpperCamelCase_: str = input_points.numpy().tolist()
if not isinstance(snake_case_ , snake_case_ ) or not isinstance(input_points[0] , snake_case_ ):
raise ValueError("""Input points must be a list of list of floating points.""" )
UpperCamelCase_: Tuple = [np.array(snake_case_ ) for input_point in input_points]
else:
UpperCamelCase_: int = None
if input_labels is not None:
if hasattr(snake_case_ , """numpy""" ):
UpperCamelCase_: Any = input_labels.numpy().tolist()
if not isinstance(snake_case_ , snake_case_ ) or not isinstance(input_labels[0] , snake_case_ ):
raise ValueError("""Input labels must be a list of list integers.""" )
UpperCamelCase_: Optional[Any] = [np.array(snake_case_ ) for label in input_labels]
else:
UpperCamelCase_: Optional[Any] = None
if input_boxes is not None:
if hasattr(snake_case_ , """numpy""" ):
UpperCamelCase_: Union[str, Any] = input_boxes.numpy().tolist()
if (
not isinstance(snake_case_ , snake_case_ )
or not isinstance(input_boxes[0] , snake_case_ )
or not isinstance(input_boxes[0][0] , snake_case_ )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
UpperCamelCase_: List[Any] = [np.array(snake_case_ ).astype(np.floataa ) for box in input_boxes]
else:
UpperCamelCase_: Optional[Any] = None
return input_points, input_labels, input_boxes
@property
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(snake_case_ ) )
def lowerCAmelCase__ ( self : Optional[int] , *snake_case_ : Tuple , **snake_case_ : Tuple ):
return self.image_processor.post_process_masks(*snake_case_ , **snake_case_ )
| 670 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = BarthezTokenizer
__UpperCamelCase : str = BarthezTokenizerFast
__UpperCamelCase : str = True
__UpperCamelCase : List[Any] = True
def lowerCAmelCase__ ( self : Optional[int] ):
super().setUp()
UpperCamelCase_: Tuple = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
UpperCamelCase_: Dict = tokenizer
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: str = """<pad>"""
UpperCamelCase_: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(snake_case_ ) , 10_1122 )
def lowerCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Union[str, Any] = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase_: Union[str, Any] = self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_: Any = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : Any ):
if not self.test_rust_tokenizer:
return
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase_: str = """I was born in 92000, and this is falsé."""
UpperCamelCase_: str = tokenizer.tokenize(snake_case_ )
UpperCamelCase_: int = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: int = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
UpperCamelCase_: int = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
UpperCamelCase_: List[str] = self.get_rust_tokenizer()
UpperCamelCase_: Tuple = tokenizer.encode(snake_case_ )
UpperCamelCase_: Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def lowerCAmelCase__ ( self : int ):
# fmt: off
UpperCamelCase_: Optional[Any] = {"""input_ids""": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=snake_case_ , )
| 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : int = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class _UpperCamelCase ( _A , _A ):
'''simple docstring'''
__UpperCamelCase : Tuple = """dinat"""
__UpperCamelCase : Optional[int] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , snake_case_ : Tuple=4 , snake_case_ : Optional[Any]=3 , snake_case_ : Tuple=64 , snake_case_ : List[Any]=[3, 4, 6, 5] , snake_case_ : int=[2, 4, 8, 16] , snake_case_ : str=7 , snake_case_ : Dict=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , snake_case_ : List[Any]=3.0 , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=0.0 , snake_case_ : str=0.0 , snake_case_ : Optional[Any]=0.1 , snake_case_ : str="gelu" , snake_case_ : Optional[int]=0.02 , snake_case_ : List[str]=1e-5 , snake_case_ : str=0.0 , snake_case_ : Optional[Any]=None , snake_case_ : Any=None , **snake_case_ : Any , ):
super().__init__(**snake_case_ )
UpperCamelCase_: int = patch_size
UpperCamelCase_: Union[str, Any] = num_channels
UpperCamelCase_: Dict = embed_dim
UpperCamelCase_: List[str] = depths
UpperCamelCase_: int = len(snake_case_ )
UpperCamelCase_: Optional[Any] = num_heads
UpperCamelCase_: Optional[int] = kernel_size
UpperCamelCase_: Union[str, Any] = dilations
UpperCamelCase_: List[str] = mlp_ratio
UpperCamelCase_: List[str] = qkv_bias
UpperCamelCase_: Union[str, Any] = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: Any = drop_path_rate
UpperCamelCase_: Union[str, Any] = hidden_act
UpperCamelCase_: Any = layer_norm_eps
UpperCamelCase_: List[str] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase_: List[str] = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
UpperCamelCase_: Any = layer_scale_init_value
UpperCamelCase_: str = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(snake_case_ ) + 1 )]
UpperCamelCase_, UpperCamelCase_: List[str] = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 670 |
def A__ ( lowerCamelCase , lowerCamelCase ) -> int:
while second != 0:
UpperCamelCase_: Optional[Any] = first & second
first ^= second
UpperCamelCase_: Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : Tuple = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 670 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self : str , *snake_case_ : Dict , snake_case_ : List[str]=None , snake_case_ : Tuple=None , **snake_case_ : Union[str, Any] ):
super().__init__(*snake_case_ , **snake_case_ )
UpperCamelCase_: Optional[Any] = eval_examples
UpperCamelCase_: Tuple = post_process_function
def lowerCAmelCase__ ( self : List[str] , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=None , snake_case_ : int=None , snake_case_ : str = "eval" ):
UpperCamelCase_: int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase_: int = self.get_eval_dataloader(snake_case_ )
UpperCamelCase_: str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase_: Optional[Any] = self.compute_metrics
UpperCamelCase_: Union[str, Any] = None
UpperCamelCase_: Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase_: int = time.time()
try:
UpperCamelCase_: Optional[int] = eval_loop(
snake_case_ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case_ , metric_key_prefix=snake_case_ , )
finally:
UpperCamelCase_: Tuple = compute_metrics
UpperCamelCase_: Tuple = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case_ , snake_case_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase_: str = self.post_process_function(snake_case_ , snake_case_ , output.predictions )
UpperCamelCase_: List[str] = self.compute_metrics(snake_case_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCamelCase_: List[Any] = metrics.pop(snake_case_ )
metrics.update(output.metrics )
else:
UpperCamelCase_: int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase_: Optional[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case_ )
return metrics
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple=None , snake_case_ : str = "test" ):
UpperCamelCase_: List[str] = self.get_test_dataloader(snake_case_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase_: List[str] = self.compute_metrics
UpperCamelCase_: str = None
UpperCamelCase_: Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase_: Any = time.time()
try:
UpperCamelCase_: int = eval_loop(
snake_case_ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case_ , metric_key_prefix=snake_case_ , )
finally:
UpperCamelCase_: Optional[int] = compute_metrics
UpperCamelCase_: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
snake_case_ , snake_case_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase_: Any = self.post_process_function(snake_case_ , snake_case_ , output.predictions , """predict""" )
UpperCamelCase_: Union[str, Any] = self.compute_metrics(snake_case_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
UpperCamelCase_: List[str] = metrics.pop(snake_case_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case_ )
| 670 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
lowerCamelCase_ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__UpperCamelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : Optional[str] = field(default=_A , metadata={"""help""": """The input training data file (a text file)."""} )
__UpperCamelCase : Optional[str] = field(
default=_A , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__UpperCamelCase : bool = field(
default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__UpperCamelCase : bool = field(
default=_A , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__UpperCamelCase : Optional[int] = field(
default=_A , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCAmelCase__ ( self : Dict ):
if self.train_file is not None:
UpperCamelCase_: Union[str, Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCamelCase_: Dict = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCamelCase : PreTrainedTokenizerBase
__UpperCamelCase : Union[bool, str, PaddingStrategy] = True
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Optional[int] = None
def __call__( self : Optional[int] , snake_case_ : Dict ):
UpperCamelCase_: Dict = """label""" if """label""" in features[0].keys() else """labels"""
UpperCamelCase_: int = [feature.pop(snake_case_ ) for feature in features]
UpperCamelCase_: Optional[Any] = len(snake_case_ )
UpperCamelCase_: List[str] = len(features[0]["""input_ids"""] )
UpperCamelCase_: Tuple = [
[{k: v[i] for k, v in feature.items()} for i in range(snake_case_ )] for feature in features
]
UpperCamelCase_: Any = list(chain(*snake_case_ ) )
UpperCamelCase_: List[Any] = self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
UpperCamelCase_: Tuple = {k: v.view(snake_case_ , snake_case_ , -1 ) for k, v in batch.items()}
# Add back labels
UpperCamelCase_: Optional[int] = torch.tensor(snake_case_ , dtype=torch.intaa )
return batch
def A__ ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase_: str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , lowerCamelCase , lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase_: Dict = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase )
datasets.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.set_verbosity(lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_: List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_: List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCamelCase_: List[str] = {}
if data_args.train_file is not None:
UpperCamelCase_: List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase_: Optional[int] = data_args.validation_file
UpperCamelCase_: Any = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_: Tuple = load_dataset(
lowerCamelCase , data_files=lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCamelCase_: int = load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_: Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase_: List[str] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCamelCase_: Union[str, Any] = [F'''ending{i}''' for i in range(4 )]
UpperCamelCase_: str = """sent1"""
UpperCamelCase_: List[str] = """sent2"""
if data_args.max_seq_length is None:
UpperCamelCase_: int = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
UpperCamelCase_: Optional[Any] = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_: Union[str, Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCamelCase ):
UpperCamelCase_: Optional[Any] = [[context] * 4 for context in examples[context_name]]
UpperCamelCase_: Dict = examples[question_header_name]
UpperCamelCase_: List[str] = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCamelCase )
]
# Flatten out
UpperCamelCase_: str = list(chain(*lowerCamelCase ) )
UpperCamelCase_: Any = list(chain(*lowerCamelCase ) )
# Tokenize
UpperCamelCase_: Any = tokenizer(
lowerCamelCase , lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_: str = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
UpperCamelCase_: Union[str, Any] = min(len(lowerCamelCase ) , data_args.max_train_samples )
UpperCamelCase_: Optional[int] = train_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
UpperCamelCase_: str = train_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_: Dict = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
UpperCamelCase_: str = min(len(lowerCamelCase ) , data_args.max_eval_samples )
UpperCamelCase_: Tuple = eval_dataset.select(range(lowerCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
UpperCamelCase_: str = eval_dataset.map(
lowerCamelCase , batched=lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCamelCase_: str = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCamelCase ):
UpperCamelCase_, UpperCamelCase_: List[str] = eval_predictions
UpperCamelCase_: Optional[Any] = np.argmax(lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCamelCase_: Union[str, Any] = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , compute_metrics=lowerCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase_: List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_: int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_: str = last_checkpoint
UpperCamelCase_: Optional[Any] = trainer.train(resume_from_checkpoint=lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase_: Tuple = train_result.metrics
UpperCamelCase_: Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase )
)
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""train""" , lowerCamelCase )
trainer.save_metrics("""train""" , lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_: Optional[Any] = trainer.evaluate()
UpperCamelCase_: Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase )
UpperCamelCase_: Optional[Any] = min(lowerCamelCase , len(lowerCamelCase ) )
trainer.log_metrics("""eval""" , lowerCamelCase )
trainer.save_metrics("""eval""" , lowerCamelCase )
UpperCamelCase_: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """multiple-choice""",
"""dataset_tags""": """swag""",
"""dataset_args""": """regular""",
"""dataset""": """SWAG""",
"""language""": """en""",
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase )
else:
trainer.create_model_card(**lowerCamelCase )
def A__ ( lowerCamelCase ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[Any] = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ : Union[str, Any] = logging.getLogger()
lowerCamelCase_ : List[str] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : Dict ):
os.makedirs(snake_case_ , exist_ok=snake_case_ )
UpperCamelCase_: int = {"""source""": """What is love ?""", """target""": """life"""}
UpperCamelCase_: Tuple = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
UpperCamelCase_: Tuple = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(snake_case_ , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : int , snake_case_ : str = "pytorch" ):
UpperCamelCase_: Optional[Any] = self.get_auto_remove_tmp_dir()
UpperCamelCase_: Dict = os.path.join(snake_case_ , """output""" )
UpperCamelCase_: Any = os.path.join(snake_case_ , """data""" )
self._create_dummy_data(data_dir=snake_case_ )
UpperCamelCase_: Union[str, Any] = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
UpperCamelCase_: Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(snake_case_ , env=self.get_env() )
UpperCamelCase_: Optional[int] = os.path.join(snake_case_ , """metrics.json""" )
with open(snake_case_ ) as f:
UpperCamelCase_: Any = json.load(snake_case_ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : Dict ):
UpperCamelCase_: List[Any] = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 670 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : Tuple = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Union[str, Any] = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , snake_case_ : int , snake_case_ : Optional[Any]=None , snake_case_ : List[str]=None ):
UpperCamelCase_: List[Any] = data
UpperCamelCase_: List[Any] = previous
UpperCamelCase_: Tuple = next_node
def __str__( self : Dict ):
return f'''{self.data}'''
def lowerCAmelCase__ ( self : List[str] ):
return self.data
def lowerCAmelCase__ ( self : Any ):
return self.next
def lowerCAmelCase__ ( self : List[str] ):
return self.previous
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = head
def __iter__( self : Union[str, Any] ):
return self
def lowerCAmelCase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
UpperCamelCase_: Dict = self.current.get_data()
UpperCamelCase_: Tuple = self.current.get_next()
return value
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int ):
UpperCamelCase_: Optional[int] = None # First node in list
UpperCamelCase_: Dict = None # Last node in list
def __str__( self : Tuple ):
UpperCamelCase_: int = self.head
UpperCamelCase_: Tuple = []
while current is not None:
nodes.append(current.get_data() )
UpperCamelCase_: List[str] = current.get_next()
return " ".join(str(snake_case_ ) for node in nodes )
def __contains__( self : int , snake_case_ : int ):
UpperCamelCase_: Optional[Any] = self.head
while current:
if current.get_data() == value:
return True
UpperCamelCase_: Any = current.get_next()
return False
def __iter__( self : Any ):
return LinkedListIterator(self.head )
def lowerCAmelCase__ ( self : Tuple ):
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : Node ):
if self.head is None:
UpperCamelCase_: Tuple = node
UpperCamelCase_: Optional[int] = node
else:
self.insert_before_node(self.head , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node ):
if self.head is None:
self.set_head(snake_case_ )
else:
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : int ):
UpperCamelCase_: Any = Node(snake_case_ )
if self.head is None:
self.set_head(snake_case_ )
else:
self.set_tail(snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: str = node
UpperCamelCase_: int = node.previous
if node.get_previous() is None:
UpperCamelCase_: int = node_to_insert
else:
UpperCamelCase_: Dict = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Dict , snake_case_ : Node , snake_case_ : Node ):
UpperCamelCase_: Tuple = node
UpperCamelCase_: Dict = node.next
if node.get_next() is None:
UpperCamelCase_: Union[str, Any] = node_to_insert
else:
UpperCamelCase_: str = node_to_insert
UpperCamelCase_: int = node_to_insert
def lowerCAmelCase__ ( self : Tuple , snake_case_ : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = 1
UpperCamelCase_: List[str] = Node(snake_case_ )
UpperCamelCase_: Optional[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(snake_case_ , snake_case_ )
return
current_position += 1
UpperCamelCase_: Dict = node.next
self.insert_after_node(self.tail , snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : int ):
UpperCamelCase_: Union[str, Any] = self.head
while node:
if node.get_data() == item:
return node
UpperCamelCase_: List[Any] = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase__ ( self : List[Any] , snake_case_ : List[str] ):
if (node := self.get_node(snake_case_ )) is not None:
if node == self.head:
UpperCamelCase_: Optional[int] = self.head.get_next()
if node == self.tail:
UpperCamelCase_: Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(snake_case_ )
@staticmethod
def lowerCAmelCase__ ( snake_case_ : Node ):
if node.get_next():
UpperCamelCase_: str = node.previous
if node.get_previous():
UpperCamelCase_: int = node.next
UpperCamelCase_: List[str] = None
UpperCamelCase_: int = None
def lowerCAmelCase__ ( self : str ):
return self.head is None
def A__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCamelCase_ : Tuple = """__DUMMY_TRANSFORMERS_USER__"""
lowerCamelCase_ : List[Any] = """Dummy User"""
lowerCamelCase_ : Optional[int] = """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"""
lowerCamelCase_ : int = """https://hub-ci.huggingface.co"""
lowerCamelCase_ : Optional[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
lowerCamelCase_ : Optional[Any] = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
lowerCamelCase_ : str = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def A__ ( lowerCamelCase ) -> int:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowerCamelCase )
@pytest.fixture
def A__ ( lowerCamelCase ) -> List[Any]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowerCamelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowerCamelCase )
@pytest.fixture
def A__ ( lowerCamelCase ) -> List[Any]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowerCamelCase )
@pytest.fixture
def A__ ( lowerCamelCase , lowerCamelCase ) -> Dict:
HfFolder.save_token(lowerCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def A__ ( ) -> Any:
return HfApi(endpoint=lowerCamelCase )
@pytest.fixture(scope="""session""" )
def A__ ( lowerCamelCase ) -> Any:
UpperCamelCase_: Optional[int] = HfFolder.get_token()
HfFolder.save_token(lowerCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCamelCase )
@pytest.fixture
def A__ ( lowerCamelCase ) -> Dict:
def _cleanup_repo(lowerCamelCase ):
hf_api.delete_repo(lowerCamelCase , token=lowerCamelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def A__ ( lowerCamelCase ) -> int:
@contextmanager
def _temporary_repo(lowerCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(lowerCamelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
UpperCamelCase_: str = F'''repo_txt_data-{int(time.time() * 1_0E3 )}'''
UpperCamelCase_: Dict = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(lowerCamelCase , token=lowerCamelCase , repo_type="""dataset""" , private=lowerCamelCase )
hf_api.upload_file(
token=lowerCamelCase , path_or_fileobj=str(lowerCamelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=lowerCamelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase , token=lowerCamelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
UpperCamelCase_: Optional[Any] = F'''repo_zipped_txt_data-{int(time.time() * 1_0E3 )}'''
UpperCamelCase_: Tuple = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(lowerCamelCase , token=lowerCamelCase , repo_type="""dataset""" , private=lowerCamelCase )
hf_api.upload_file(
token=lowerCamelCase , path_or_fileobj=str(lowerCamelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCamelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase , token=lowerCamelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
UpperCamelCase_: Any = F'''repo_zipped_img_data-{int(time.time() * 1_0E3 )}'''
UpperCamelCase_: List[Any] = F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(lowerCamelCase , token=lowerCamelCase , repo_type="""dataset""" , private=lowerCamelCase )
hf_api.upload_file(
token=lowerCamelCase , path_or_fileobj=str(lowerCamelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCamelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCamelCase , token=lowerCamelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Any:
return hf_private_dataset_repo_zipped_img_data_
| 670 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ : List[str] = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 670 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.