code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase ="hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE = np.array([0.65_072, 0.58_492, 0.48_219, 0.55_521, 0.53_180, 0.55_939, 0.50_697, 0.39_800, 0.46_455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE = np.array([0.65_863, 0.59_425, 0.49_326, 0.56_313, 0.53_875, 0.56_627, 0.51_065, 0.39_777, 0.46_330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE = np.array([0.53_755, 0.60_786, 0.47_402, 0.49_488, 0.51_869, 0.49_819, 0.47_985, 0.38_957, 0.44_279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE = np.array([0.53_817, 0.60_812, 0.47_384, 0.49_530, 0.51_894, 0.49_814, 0.47_984, 0.38_958, 0.44_271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**snake_case__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
SCREAMING_SNAKE_CASE = np.array([0.53_895, 0.60_808, 0.47_933, 0.49_608, 0.51_886, 0.49_950, 0.48_053, 0.38_957, 0.44_200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='np' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids']
SCREAMING_SNAKE_CASE = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='np' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
@property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ort.SessionOptions()
SCREAMING_SNAKE_CASE = False
return options
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
SCREAMING_SNAKE_CASE = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=1_0 , output_type='np' )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.0_452, 0.0_390, 0.0_087, 0.0_350, 0.0_617, 0.0_364, 0.0_544, 0.0_523, 0.0_720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'open neural network exchange'
SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=snake_case__ , output_type='np' )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.2_867, 0.1_974, 0.1_481, 0.7_294, 0.7_251, 0.6_667, 0.4_194, 0.5_642, 0.6_486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'open neural network exchange'
SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=1_0 , generator=snake_case__ , output_type='np' )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
SCREAMING_SNAKE_CASE = np.array([0.2_306, 0.1_959, 0.1_593, 0.6_549, 0.6_394, 0.5_408, 0.5_065, 0.6_010, 0.6_161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 0
def test_callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : np.ndarray ) -> None:
SCREAMING_SNAKE_CASE = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array(
[-0.6_772, -0.3_835, -1.2_456, 0.1_905, -1.0_974, 0.6_967, -1.9_353, 0.0_178, 1.0_167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 6_4, 6_4)
SCREAMING_SNAKE_CASE = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = np.array(
[-0.3_351, 0.2_241, -0.1_837, -0.2_325, -0.6_577, 0.3_393, -0.0_241, 0.5_899, 1.3_875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'Andromeda galaxy in a bottle'
SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
pipe(
prompt=snake_case__ , num_inference_steps=5 , guidance_scale=7.5 , generator=snake_case__ , callback=snake_case__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , safety_checker=snake_case__ , feature_extractor=snake_case__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(snake_case__ , snake_case__ )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = OnnxStableDiffusionPipeline.from_pretrained(snake_case__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =MODEL_FOR_CAUSAL_LM_MAPPING
__UpperCamelCase =TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE = text_generator('This is a test' , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
SCREAMING_SNAKE_CASE = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
snake_case__ , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
SCREAMING_SNAKE_CASE = text_generator('This is a test' , do_sample=snake_case__ , num_return_sequences=2 , return_tensors=snake_case__ )
self.assertEqual(
snake_case__ , [
{'generated_token_ids': ANY(snake_case__ )},
{'generated_token_ids': ANY(snake_case__ )},
] , )
SCREAMING_SNAKE_CASE = text_generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE = '<pad>'
SCREAMING_SNAKE_CASE = text_generator(
['This is a test', 'This is a second test'] , do_sample=snake_case__ , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case__ , )
self.assertEqual(
snake_case__ , [
[
{'generated_token_ids': ANY(snake_case__ )},
{'generated_token_ids': ANY(snake_case__ )},
],
[
{'generated_token_ids': ANY(snake_case__ )},
{'generated_token_ids': ANY(snake_case__ )},
],
] , )
@require_tf
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
SCREAMING_SNAKE_CASE = text_generator('This is a test' , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
SCREAMING_SNAKE_CASE = text_generator(['This is a test', 'This is a second test'] , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TextGenerationPipeline(model=snake_case__ , tokenizer=snake_case__ )
return text_generator, ["This is a test", "Another test"]
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'Hello I believe in'
SCREAMING_SNAKE_CASE = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
SCREAMING_SNAKE_CASE = text_generator(snake_case__ )
self.assertEqual(
snake_case__ , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
SCREAMING_SNAKE_CASE = text_generator(snake_case__ , stop_sequence=' fe' )
self.assertEqual(snake_case__ , [{'generated_text': 'Hello I believe in fe'}] )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = text_generator.model
SCREAMING_SNAKE_CASE = text_generator.tokenizer
SCREAMING_SNAKE_CASE = text_generator('This is a test' )
self.assertEqual(snake_case__ , [{'generated_text': ANY(snake_case__ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
SCREAMING_SNAKE_CASE = text_generator('This is a test' , return_full_text=snake_case__ )
self.assertEqual(snake_case__ , [{'generated_text': ANY(snake_case__ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
SCREAMING_SNAKE_CASE = pipeline(task='text-generation' , model=snake_case__ , tokenizer=snake_case__ , return_full_text=snake_case__ )
SCREAMING_SNAKE_CASE = text_generator('This is a test' )
self.assertEqual(snake_case__ , [{'generated_text': ANY(snake_case__ )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
SCREAMING_SNAKE_CASE = text_generator('This is a test' , return_full_text=snake_case__ )
self.assertEqual(snake_case__ , [{'generated_text': ANY(snake_case__ )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
SCREAMING_SNAKE_CASE = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
SCREAMING_SNAKE_CASE = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case__ )
self.assertEqual(
snake_case__ , [
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
[{'generated_text': ANY(snake_case__ )}, {'generated_text': ANY(snake_case__ )}],
] , )
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE = text_generator('test' , return_full_text=snake_case__ , return_text=snake_case__ )
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE = text_generator('test' , return_full_text=snake_case__ , return_tensors=snake_case__ )
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE = text_generator('test' , return_text=snake_case__ , return_tensors=snake_case__ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
SCREAMING_SNAKE_CASE = text_generator('' )
self.assertEqual(snake_case__ , [{'generated_text': ANY(snake_case__ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
SCREAMING_SNAKE_CASE = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
SCREAMING_SNAKE_CASE = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 5_0_0 , max_new_tokens=2_0 )
SCREAMING_SNAKE_CASE = text_generator('This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(snake_case__ ):
text_generator(
'This is a test' * 5_0_0 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
import torch
# Classic `model_kwargs`
SCREAMING_SNAKE_CASE = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE = pipe('This is a test' )
self.assertEqual(
snake_case__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
SCREAMING_SNAKE_CASE = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
SCREAMING_SNAKE_CASE = pipe('This is a test' )
self.assertEqual(
snake_case__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
SCREAMING_SNAKE_CASE = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
SCREAMING_SNAKE_CASE = pipe('This is a test' )
self.assertEqual(
snake_case__ , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=snake_case__ , top_p=0.5 )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'Hello world'
SCREAMING_SNAKE_CASE = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.generation.tf_utils' )
else:
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.generation.utils' )
SCREAMING_SNAKE_CASE = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(snake_case__ ) as cl:
SCREAMING_SNAKE_CASE = text_generator(snake_case__ , max_length=1_0 , max_new_tokens=1 )
self.assertIn(snake_case__ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(snake_case__ ) as cl:
SCREAMING_SNAKE_CASE = text_generator(snake_case__ , max_new_tokens=1 )
self.assertNotIn(snake_case__ , cl.out )
with CaptureLogger(snake_case__ ) as cl:
SCREAMING_SNAKE_CASE = text_generator(snake_case__ , max_length=1_0 )
self.assertNotIn(snake_case__ , cl.out )
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : list[list[int | float]] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = len(matrix[0] )
SCREAMING_SNAKE_CASE = min(_UpperCamelCase , _UpperCamelCase )
for row in range(_UpperCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = matrix[col][row] / matrix[row][row]
for i in range(_UpperCamelCase , _UpperCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
SCREAMING_SNAKE_CASE = True
for i in range(row + 1 , _UpperCamelCase ):
if matrix[i][row] != 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = matrix[i], matrix[row]
SCREAMING_SNAKE_CASE = False
break
if reduce:
rank -= 1
for i in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
import math
def __lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : int = 1_00_01 ) -> int:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 2
while len(_UpperCamelCase ) < nth:
if is_prime(_UpperCamelCase ):
primes.append(_UpperCamelCase )
num += 1
else:
num += 1
return primes[len(_UpperCamelCase ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : str=1_3 , snake_case__ : Dict=7 , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : List[str]=True , snake_case__ : Tuple=True , snake_case__ : str=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Any=5 , snake_case__ : List[Any]=4 , snake_case__ : Tuple=3_7 , snake_case__ : List[Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=5_1_2 , snake_case__ : int=1_6 , snake_case__ : List[str]=2 , snake_case__ : str=0.02 , snake_case__ : List[str]=4 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_attention_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = FlaxAlbertModelTester(self )
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('albert-base-v2' )
SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case__ )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = FlaxAlbertModel.from_pretrained('albert-base-v2' )
SCREAMING_SNAKE_CASE = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )[0]
SCREAMING_SNAKE_CASE = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1E-4 ) )
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [0] * len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [1] * len(_UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCamelCase )
while queue:
SCREAMING_SNAKE_CASE = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_UpperCamelCase )
print(max(_UpperCamelCase ) )
# Adjacency list of Graph
a_ : str = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 673 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 1 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : Optional[int] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
a_ : List[str] = {"mobilebert-uncased": 512}
a_ : Dict = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =MobileBertTokenizer
def __init__( self : Any , snake_case__ : int=None , snake_case__ : Optional[Any]=None , snake_case__ : Optional[int]=True , snake_case__ : List[str]="[UNK]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Any="[PAD]" , snake_case__ : str="[CLS]" , snake_case__ : Any="[MASK]" , snake_case__ : Any=True , snake_case__ : List[Any]=None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , snake_case__ ) != do_lower_case
or normalizer_state.get('strip_accents' , snake_case__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , snake_case__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(snake_case__ , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**snake_case__ )
SCREAMING_SNAKE_CASE = do_lower_case
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : int , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int = 1_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 1 |
import sys
a_ : Tuple = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __lowerCAmelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 1
for digit in s:
product *= int(_UpperCamelCase )
return product
def __lowerCAmelCase ( _UpperCamelCase : str = N ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = -sys.maxsize - 1
SCREAMING_SNAKE_CASE = n[:13]
SCREAMING_SNAKE_CASE = 13
while cur_index < len(_UpperCamelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
SCREAMING_SNAKE_CASE = substr[1:] + n[cur_index]
cur_index += 1
else:
SCREAMING_SNAKE_CASE = max(_UpperCamelCase , str_eval(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
if n_element < 1:
SCREAMING_SNAKE_CASE = ValueError('a should be a positive number' )
raise my_error
SCREAMING_SNAKE_CASE = [1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (0, 0, 0)
SCREAMING_SNAKE_CASE = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
a_ : Dict = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
a_ : Any = hamming(int(n))
print("-----------------------------------------------------")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("-----------------------------------------------------")
| 673 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 1 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
raise RuntimeError('CUDA out of memory.' )
class UpperCamelCase ( nn.Module ):
def __init__( self : Any ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE = nn.Linear(4 , 5 )
def UpperCamelCase ( self : Any , snake_case__ : str ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) )
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case__ : int ):
nonlocal batch_sizes
batch_sizes.append(snake_case__ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(snake_case__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case__ : Union[str, Any] , snake_case__ : Any ):
nonlocal batch_sizes
batch_sizes.append(snake_case__ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = mock_training_loop_function('hello' )
self.assertListEqual(snake_case__ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, 'hello'] )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(snake_case__ : Union[str, Any] ):
pass
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self : int ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(snake_case__ : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] )
def UpperCamelCase ( self : int ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case__ : int , snake_case__ : Tuple , snake_case__ : Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function(1_2_8 , 'hello' , 'world' )
self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(snake_case__ : Dict ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(snake_case__ ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' , cm.exception.args[0] )
@require_cuda
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.cuda.memory_allocated()
SCREAMING_SNAKE_CASE = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , snake_case__ )
SCREAMING_SNAKE_CASE = release_memory(snake_case__ )
self.assertEqual(torch.cuda.memory_allocated() , snake_case__ )
| 673 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 1 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
@require_torch
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='fill-mask' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE = '1'
SCREAMING_SNAKE_CASE = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
SCREAMING_SNAKE_CASE = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='fill-mask' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
SCREAMING_SNAKE_CASE = self.get_env()
SCREAMING_SNAKE_CASE = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
SCREAMING_SNAKE_CASE = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
SCREAMING_SNAKE_CASE = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE = self.get_env()
SCREAMING_SNAKE_CASE = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
SCREAMING_SNAKE_CASE = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE = '1'
SCREAMING_SNAKE_CASE = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '\nfrom transformers import pipeline\n '
SCREAMING_SNAKE_CASE = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
SCREAMING_SNAKE_CASE = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
SCREAMING_SNAKE_CASE = self.get_env()
SCREAMING_SNAKE_CASE = '1'
SCREAMING_SNAKE_CASE = [sys.executable, '-c', '\n'.join([load, mock, run] )]
SCREAMING_SNAKE_CASE = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = '\nfrom transformers import AutoModel\n '
SCREAMING_SNAKE_CASE = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
SCREAMING_SNAKE_CASE = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
SCREAMING_SNAKE_CASE = self.get_env()
SCREAMING_SNAKE_CASE = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
SCREAMING_SNAKE_CASE = '1'
SCREAMING_SNAKE_CASE = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
a_ : Optional[Any] = get_tests_dir("fixtures/dummy-config.json")
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 0
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('transformers.models.auto' ) )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained('bert-base-uncased' )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AutoConfig.for_model('roberta' )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , 'fake-roberta' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(os.path.join(snake_case__ , 'config.json' ) , 'w' ) as f:
f.write(json.dumps({} ) )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(type(snake_case__ ) , snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
try:
AutoConfig.register('custom' , snake_case__ )
# Wrong model type will raise an error
with self.assertRaises(snake_case__ ):
AutoConfig.register('model' , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoConfig.register('bert' , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained('bert-base' )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(snake_case__ , revision='aaaaaa' )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , 'hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.' , ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained('hf-internal-testing/no-config-test-repo' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=snake_case__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=snake_case__ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_config.__class__.__name__ , 'NewModelConfig' )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="new-model"
try:
AutoConfig.register('new-model' , snake_case__ )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=snake_case__ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfigLocal' )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained('hf-internal-testing/test_dynamic_model' , trust_remote_code=snake_case__ )
self.assertEqual(config.__class__.__name__ , 'NewModelConfig' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
a_ : Dict = logging.get_logger(__name__)
a_ : Any = "The Nymphenburg Palace is a beautiful palace in Munich!"
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
SCREAMING_SNAKE_CASE = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
SCREAMING_SNAKE_CASE = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=_UpperCamelCase , output_all_encodings=_UpperCamelCase , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , _UpperCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
SCREAMING_SNAKE_CASE = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
SCREAMING_SNAKE_CASE = os.path.join(get_home_dir() , 'models' )
SCREAMING_SNAKE_CASE = _load_vocab(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , cls=_UpperCamelCase )
SCREAMING_SNAKE_CASE = nlp.model.BERTModel(
_UpperCamelCase , len(_UpperCamelCase ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=_UpperCamelCase , use_token_type_embed=_UpperCamelCase , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=_UpperCamelCase , use_decoder=_UpperCamelCase , )
original_bort.load_parameters(_UpperCamelCase , cast_dtype=_UpperCamelCase , ignore_extra=_UpperCamelCase )
SCREAMING_SNAKE_CASE = original_bort._collect_params_with_prefix()
# Build our config 🤗
SCREAMING_SNAKE_CASE = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(_UpperCamelCase ),
}
SCREAMING_SNAKE_CASE = BertConfig.from_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = BertForMaskedLM(_UpperCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(_UpperCamelCase : Union[str, Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(_UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = hf_param.shape
SCREAMING_SNAKE_CASE = to_torch(params[gluon_param] )
SCREAMING_SNAKE_CASE = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
SCREAMING_SNAKE_CASE = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
SCREAMING_SNAKE_CASE = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
SCREAMING_SNAKE_CASE = hf_bort_model.bert.encoder.layer[i]
# self attention
SCREAMING_SNAKE_CASE = layer.attention.self
SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
SCREAMING_SNAKE_CASE = layer.attention.output
SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
SCREAMING_SNAKE_CASE = layer.intermediate
SCREAMING_SNAKE_CASE = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
SCREAMING_SNAKE_CASE = layer.output
SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
SCREAMING_SNAKE_CASE = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('roberta-base' )
SCREAMING_SNAKE_CASE = tokenizer.encode_plus(_UpperCamelCase )['input_ids']
# Get gluon output
SCREAMING_SNAKE_CASE = mx.nd.array([input_ids] )
SCREAMING_SNAKE_CASE = original_bort(inputs=_UpperCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = BertModel.from_pretrained(_UpperCamelCase )
hf_bort_model.eval()
SCREAMING_SNAKE_CASE = tokenizer.encode_plus(_UpperCamelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE = hf_bort_model(**_UpperCamelCase )[0]
SCREAMING_SNAKE_CASE = output_gluon[0].asnumpy()
SCREAMING_SNAKE_CASE = output_hf[0].detach().numpy()
SCREAMING_SNAKE_CASE = np.max(np.abs(hf_layer - gluon_layer ) ).item()
SCREAMING_SNAKE_CASE = np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , _UpperCamelCase )
if __name__ == "__main__":
a_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : Any = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 673 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Union[str, Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 1 |
import math
def __lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
return math.sqrt(_UpperCamelCase ) * math.sqrt(_UpperCamelCase ) == num
def __lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = n
while left <= right:
SCREAMING_SNAKE_CASE = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
SCREAMING_SNAKE_CASE = mid - 1
else:
SCREAMING_SNAKE_CASE = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE = quote(_UpperCamelCase )
return hfh.hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' , revision=_UpperCamelCase )
| 673 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 1 |
import operator
def __lowerCAmelCase ( _UpperCamelCase : list , _UpperCamelCase : bool = False , _UpperCamelCase : list | None = None ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE = [arr.pop(0 )]
for i, item in enumerate(_UpperCamelCase ):
if _operator(_UpperCamelCase , sublist[-1] ):
sublist.append(_UpperCamelCase )
arr.pop(_UpperCamelCase )
# merging sublist into solution list
if not solution:
solution.extend(_UpperCamelCase )
else:
while sublist:
SCREAMING_SNAKE_CASE = sublist.pop(0 )
for i, xx in enumerate(_UpperCamelCase ):
if not _operator(_UpperCamelCase , _UpperCamelCase ):
solution.insert(_UpperCamelCase , _UpperCamelCase )
break
else:
solution.append(_UpperCamelCase )
strand_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 673 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
a_ : Any = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
a_ : List[Any] = BASE_URL + "/user"
# https://github.com/settings/tokens
a_ : Any = os.environ.get("USER_TOKEN", "")
def __lowerCAmelCase ( _UpperCamelCase : str ) -> dict[Any, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'Authorization': f"""token {auth_token}""",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 673 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : List[str] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 673 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE = 10_24
SCREAMING_SNAKE_CASE = 40_96
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = [5, 11, 17, 23]
SCREAMING_SNAKE_CASE = [2_56, 5_12, 10_24, 10_24]
SCREAMING_SNAKE_CASE = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 1_50
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE = name.replace('proj' , 'projection' )
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
SCREAMING_SNAKE_CASE = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
SCREAMING_SNAKE_CASE = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
SCREAMING_SNAKE_CASE = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
SCREAMING_SNAKE_CASE = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
SCREAMING_SNAKE_CASE = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained' , 'dpt' )
if "bn" in name:
SCREAMING_SNAKE_CASE = name.replace('bn' , 'batch_norm' )
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dpt_config(_UpperCamelCase )
# load original state_dict from URL
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCamelCase )
SCREAMING_SNAKE_CASE = val
# read in qkv matrices
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(_UpperCamelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE = 4_80 if 'ade' in checkpoint_url else 3_84
SCREAMING_SNAKE_CASE = DPTImageProcessor(size=_UpperCamelCase )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors='pt' )
# forward pass
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ).logits if 'ade' in checkpoint_url else model(**_UpperCamelCase ).predicted_depth
# Assert logits
SCREAMING_SNAKE_CASE = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(_UpperCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _UpperCamelCase )
)
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
a_ : Any = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 673 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 1 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) + 1
SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
SCREAMING_SNAKE_CASE = [[0 for i in range(_UpperCamelCase )] for j in range(_UpperCamelCase )]
# since string of zero length match pattern of zero length
SCREAMING_SNAKE_CASE = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _UpperCamelCase ):
for j in range(1 , _UpperCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
SCREAMING_SNAKE_CASE = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
SCREAMING_SNAKE_CASE = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
SCREAMING_SNAKE_CASE = dp[i - 1][j]
else:
SCREAMING_SNAKE_CASE = 0
else:
SCREAMING_SNAKE_CASE = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a_ : Optional[int] = "aab"
a_ : List[Any] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"""{input_string} matches the given pattern {pattern}""")
else:
print(F"""{input_string} does not match with the given pattern {pattern}""")
| 673 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a_ : Any = None
a_ : List[str] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a_ : List[str] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
a_ : List[Any] = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
a_ : Dict = "▁"
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =BigBirdTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
__UpperCamelCase =[]
def __init__( self : Optional[Any] , snake_case__ : Dict=None , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : int="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Tuple="<pad>" , snake_case__ : Tuple="[SEP]" , snake_case__ : Union[str, Any]="[MASK]" , snake_case__ : Dict="[CLS]" , **snake_case__ : Dict , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCamelCase ( self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file , snake_case__ )
return (out_vocab_file,)
| 673 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
a_ : List[str] = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class UpperCamelCase ( _lowerCamelCase ):
__UpperCamelCase ="sew"
def __init__( self : List[Any] , snake_case__ : Tuple=3_2 , snake_case__ : Any=7_6_8 , snake_case__ : int=1_2 , snake_case__ : Optional[int]=1_2 , snake_case__ : List[str]=3_0_7_2 , snake_case__ : Any=2 , snake_case__ : int="gelu" , snake_case__ : List[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : List[str]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : str=0.02 , snake_case__ : Optional[int]=1E-5 , snake_case__ : List[Any]="group" , snake_case__ : Any="gelu" , snake_case__ : Optional[Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , snake_case__ : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case__ : List[Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case__ : List[str]=False , snake_case__ : List[str]=1_2_8 , snake_case__ : List[str]=1_6 , snake_case__ : List[Any]=True , snake_case__ : List[str]=0.05 , snake_case__ : Dict=1_0 , snake_case__ : str=2 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Tuple=1_0 , snake_case__ : Tuple=0 , snake_case__ : str="mean" , snake_case__ : Any=False , snake_case__ : Optional[Any]=False , snake_case__ : Optional[Any]=2_5_6 , snake_case__ : Any=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Any=2 , **snake_case__ : Optional[int] , ):
"""simple docstring"""
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(A__ )
SCREAMING_SNAKE_CASE = list(A__ )
SCREAMING_SNAKE_CASE = list(A__ )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = squeeze_factor
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 700 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
SCREAMING_SNAKE_CASE = hex_num[0] == '-'
if is_negative:
SCREAMING_SNAKE_CASE = hex_num[1:]
try:
SCREAMING_SNAKE_CASE = int(__A , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
SCREAMING_SNAKE_CASE = ''
while int_num > 0:
SCREAMING_SNAKE_CASE = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a_ : Optional[Any] = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None ) -> int:
'''simple docstring'''
require_version(deps[pkg] , _SCREAMING_SNAKE_CASE )
| 702 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCamelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =VideoToVideoSDPipeline
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
__UpperCamelCase =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
__UpperCamelCase =PipelineTesterMixin.required_optional_params - {"latents"}
__UpperCamelCase =False
# No `output_type`.
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=3_2 , attention_head_dim=4 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=A_ , set_alpha_to_one=A_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(A_ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Any=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(A_ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=A_ ).manual_seed(A_ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**A_ )
SCREAMING_SNAKE_CASE = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(A_ )
SCREAMING_SNAKE_CASE = 'np'
SCREAMING_SNAKE_CASE = sd_pipe(**A_ ).frames
SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
SCREAMING_SNAKE_CASE = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
SCREAMING_SNAKE_CASE = torch.Generator(device='cpu' ).manual_seed(0 )
SCREAMING_SNAKE_CASE = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=A_ )
SCREAMING_SNAKE_CASE = video.to('cuda' )
SCREAMING_SNAKE_CASE = 'Spiderman is surfing'
SCREAMING_SNAKE_CASE = pipe(A_ , video=A_ , generator=A_ , num_inference_steps=3 , output_type='pt' ).frames
SCREAMING_SNAKE_CASE = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2 | 703 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
SCREAMING_SNAKE_CASE = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE = {
'do_resize': True,
'size': {'height': 1_8, 'width': 1_8},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase ( self : Any , **snake_case__ : List[str] ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def UpperCamelCase ( self : Optional[Any] , **snake_case__ : Dict ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(UpperCamelCase_ , return_tensors='np' )
SCREAMING_SNAKE_CASE = processor(images=UpperCamelCase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = processor(text=UpperCamelCase_ )
SCREAMING_SNAKE_CASE = tokenizer(UpperCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase_ ):
processor()
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 704 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCamelCase ( unittest.TestCase , lowercase__ ):
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_tool('text-to-speech' )
self.tool.setup()
def UpperCamelCase ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = self.tool('hey' )
SCREAMING_SNAKE_CASE = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = self.tool('hey' )
SCREAMING_SNAKE_CASE = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 705 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
a_ : Any = 4
a_ : int = 3
class UpperCamelCase ( __lowerCamelCase ):
pass
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
for shard in shards:
for i in range(lowerCamelCase_ ):
yield {"i": i, "shard": shard}
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(os.environ['RANK'] )
SCREAMING_SNAKE_CASE = int(os.environ['WORLD_SIZE'] )
SCREAMING_SNAKE_CASE = ArgumentParser()
parser.add_argument('--streaming' , type=lowerCamelCase_ )
parser.add_argument('--local_rank' , type=lowerCamelCase_ )
parser.add_argument('--num_workers' , type=lowerCamelCase_ , default=0 )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.streaming
SCREAMING_SNAKE_CASE = args.num_workers
SCREAMING_SNAKE_CASE = {'shards': [f"""shard_{shard_idx}""" for shard_idx in range(lowerCamelCase_ )]}
SCREAMING_SNAKE_CASE = IterableDataset.from_generator(lowerCamelCase_ , gen_kwargs=lowerCamelCase_ )
if not streaming:
SCREAMING_SNAKE_CASE = Dataset.from_list(list(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE = split_dataset_by_node(lowerCamelCase_ , rank=lowerCamelCase_ , world_size=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = torch.utils.data.DataLoader(lowerCamelCase_ , num_workers=lowerCamelCase_ )
SCREAMING_SNAKE_CASE = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 706 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a_ : Any = logging.get_logger(__name__)
class UpperCamelCase ( UpperCAmelCase_ ):
def __init__( self : Optional[Any] , *snake_case__ : Optional[int] , **snake_case__ : List[str] ):
"""simple docstring"""
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 707 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 0 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(snake_case__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(snake_case__ ):
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
@slow
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case__ : Optional[int] ):
return model(**snake_case__ )
eval(**snake_case__ ).block_until_ready()
@slow
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = FlaxRobertaModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**snake_case__ : Optional[int] ):
return model(**snake_case__ )
eval(**snake_case__ ).block_until_ready()
def UpperCamelCase ( self : str ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , 'bert-base is not a local folder and is not a valid model identifier' ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained(snake_case__ , revision='aaaaaa' )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(snake_case__ , 'Use `from_pt=True` to load this model' ):
SCREAMING_SNAKE_CASE = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 708 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 0 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __lowerCAmelCase ( *_UpperCamelCase : int ) -> Dict:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE = list(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __lowerCAmelCase ( _UpperCamelCase : Exception ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __lowerCAmelCase ( _UpperCamelCase : callable = None , _UpperCamelCase : int = 1_28 ) -> Optional[int]:
'''simple docstring'''
if function is None:
return functools.partial(__lowerCAmelCase , starting_batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE = starting_batch_size
def decorator(*_UpperCamelCase : Optional[Any] , **_UpperCamelCase : int ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE = list(inspect.signature(__lowerCAmelCase ).parameters.keys() )
# Guard against user error
if len(__lowerCAmelCase ) < (len(__lowerCAmelCase ) + 1):
SCREAMING_SNAKE_CASE = ', '.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
except Exception as e:
if should_reduce_batch_size(__lowerCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 709 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase ( pl.LightningModule ):
def __init__( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
pass
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = LongformerModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = LightningModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.load(lowerCamelCase__ , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
SCREAMING_SNAKE_CASE = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 710 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 0 |
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =["image_processor"]
__UpperCamelCase ="SamImageProcessor"
def __init__( self : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE = self.image_processor
SCREAMING_SNAKE_CASE = -1_0
SCREAMING_SNAKE_CASE = self.image_processor.size["longest_edge"]
def __call__( self : Any , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=None , snake_case__ : Dict=None , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processor(
__A , return_tensors=__A , **__A , )
# pop arguments that are not used in the foward but used nevertheless
SCREAMING_SNAKE_CASE = encoding_image_processor["original_sizes"]
if hasattr(__A , 'numpy' ): # Checks if Torch or TF tensor
SCREAMING_SNAKE_CASE = original_sizes.numpy()
SCREAMING_SNAKE_CASE = self._check_and_preprocess_points(
input_points=__A , input_labels=__A , input_boxes=__A , )
SCREAMING_SNAKE_CASE = self._normalize_and_convert(
__A , __A , input_points=__A , input_labels=__A , input_boxes=__A , return_tensors=__A , )
return encoding_image_processor
def UpperCamelCase ( self : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[str]=None , snake_case__ : int=None , snake_case__ : Any=None , snake_case__ : int="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(__A ) != len(__A ):
SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , __A , original_sizes[0] ) for point in input_points
]
else:
SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , __A , __A )
for point, original_size in zip(__A , __A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
SCREAMING_SNAKE_CASE = self._pad_points_and_labels(__A , __A )
SCREAMING_SNAKE_CASE = np.array(__A )
if input_labels is not None:
SCREAMING_SNAKE_CASE = np.array(__A )
if input_boxes is not None:
if len(__A ) != len(__A ):
SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , __A , original_sizes[0] , is_bounding_box=__A )
for box in input_boxes
]
else:
SCREAMING_SNAKE_CASE = [
self._normalize_coordinates(self.target_size , __A , __A , is_bounding_box=__A )
for box, original_size in zip(__A , __A )
]
SCREAMING_SNAKE_CASE = np.array(__A )
if input_boxes is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE = torch.from_numpy(__A )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__A )
# boxes batch size of 1 by default
SCREAMING_SNAKE_CASE = tf.expand_dims(__A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE = torch.from_numpy(__A )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__A )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE = tf.expand_dims(__A , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
SCREAMING_SNAKE_CASE = torch.from_numpy(__A )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(__A )
# point batch size of 1 by default
SCREAMING_SNAKE_CASE = tf.expand_dims(__A , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = max([point.shape[0] for point in input_points] )
SCREAMING_SNAKE_CASE = []
for i, point in enumerate(__A ):
if point.shape[0] != expected_nb_points:
SCREAMING_SNAKE_CASE = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
SCREAMING_SNAKE_CASE = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__A )
SCREAMING_SNAKE_CASE = processed_input_points
return input_points, input_labels
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : np.ndarray , snake_case__ : Union[str, Any] , snake_case__ : Optional[int]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = original_size
SCREAMING_SNAKE_CASE = self.image_processor._get_preprocess_shape(__A , longest_edge=__A )
SCREAMING_SNAKE_CASE = deepcopy(__A ).astype(__A )
if is_bounding_box:
SCREAMING_SNAKE_CASE = coords.reshape(-1 , 2 , 2 )
SCREAMING_SNAKE_CASE = coords[..., 0] * (new_w / old_w)
SCREAMING_SNAKE_CASE = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
SCREAMING_SNAKE_CASE = coords.reshape(-1 , 4 )
return coords
def UpperCamelCase ( self : List[str] , snake_case__ : str=None , snake_case__ : Tuple=None , snake_case__ : List[Any]=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(__A , 'numpy' ): # Checks for TF or Torch tensor
SCREAMING_SNAKE_CASE = input_points.numpy().tolist()
if not isinstance(__A , __A ) or not isinstance(input_points[0] , __A ):
raise ValueError('Input points must be a list of list of floating points.' )
SCREAMING_SNAKE_CASE = [np.array(__A ) for input_point in input_points]
else:
SCREAMING_SNAKE_CASE = None
if input_labels is not None:
if hasattr(__A , 'numpy' ):
SCREAMING_SNAKE_CASE = input_labels.numpy().tolist()
if not isinstance(__A , __A ) or not isinstance(input_labels[0] , __A ):
raise ValueError('Input labels must be a list of list integers.' )
SCREAMING_SNAKE_CASE = [np.array(__A ) for label in input_labels]
else:
SCREAMING_SNAKE_CASE = None
if input_boxes is not None:
if hasattr(__A , 'numpy' ):
SCREAMING_SNAKE_CASE = input_boxes.numpy().tolist()
if (
not isinstance(__A , __A )
or not isinstance(input_boxes[0] , __A )
or not isinstance(input_boxes[0][0] , __A )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
SCREAMING_SNAKE_CASE = [np.array(__A ).astype(np.floataa ) for box in input_boxes]
else:
SCREAMING_SNAKE_CASE = None
return input_points, input_labels, input_boxes
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(__A ) )
def UpperCamelCase ( self : List[str] , *snake_case__ : int , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.image_processor.post_process_masks(*__A , **__A )
| 711 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
a_ : str = "\\n Text data.\n Second line of data."
a_ : List[Any] = "file"
@pytest.fixture(scope='session' )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data' ) / (FILE_PATH + """.zstd""")
SCREAMING_SNAKE_CASE = bytes(__lowerCAmelCase , 'utf-8' )
with zstd.open(__lowerCAmelCase , 'wb' ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __lowerCAmelCase ) , 'w' ) as f:
f.write(__lowerCAmelCase )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
SCREAMING_SNAKE_CASE = input_paths[compression_format]
SCREAMING_SNAKE_CASE = tmp_path / """cache"""
SCREAMING_SNAKE_CASE = DownloadConfig(cache_dir=__lowerCAmelCase , extract_compressed_file=__lowerCAmelCase )
SCREAMING_SNAKE_CASE = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
with open(__lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE = f.read()
with open(__lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """custom_cache"""
SCREAMING_SNAKE_CASE = """custom_extracted_dir"""
SCREAMING_SNAKE_CASE = tmp_path / """custom_extracted_path"""
if default_extracted:
SCREAMING_SNAKE_CASE = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , __lowerCAmelCase )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE = xz_file
SCREAMING_SNAKE_CASE = (
DownloadConfig(extract_compressed_file=__lowerCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCAmelCase )
)
SCREAMING_SNAKE_CASE = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
assert Path(__lowerCAmelCase ).parent.parts[-2:] == expected
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(Path(__lowerCAmelCase ).resolve() )
assert cached_path(__lowerCAmelCase ) == text_file
# relative path
SCREAMING_SNAKE_CASE = str(Path(__lowerCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCAmelCase ) == text_file
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
# relative path
SCREAMING_SNAKE_CASE = """./__missing_file__.txt"""
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(__lowerCAmelCase ) as f:
SCREAMING_SNAKE_CASE = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def __lowerCAmelCase ( ) -> List[Any]:
'''simple docstring'''
with pytest.raises(__lowerCAmelCase ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
http_get('https://huggingface.co' , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
ftp_get('ftp://huggingface.co' , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , __lowerCAmelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp('data' ) / """file.html"""
with pytest.raises(__lowerCAmelCase ):
fsspec_get('s3://huggingface.co' , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
fsspec_head('s3://huggingface.co' )
| 712 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCamelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : List[str] , snake_case__ : float , snake_case__ : Callable , snake_case__ : int , snake_case__ : float = 1.0 , snake_case__ : str = None , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = initial_learning_rate
SCREAMING_SNAKE_CASE = warmup_steps
SCREAMING_SNAKE_CASE = power
SCREAMING_SNAKE_CASE = decay_schedule_fn
SCREAMING_SNAKE_CASE = name
def __call__( self : List[str] , snake_case__ : List[str] ):
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE = tf.cast(snake_case__ , tf.floataa )
SCREAMING_SNAKE_CASE = tf.cast(self.warmup_steps , tf.floataa )
SCREAMING_SNAKE_CASE = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE = self.initial_learning_rate * tf.math.pow(snake_case__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=snake_case__ , )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple = 0.0 , _UpperCamelCase : Optional[Any] = 0.9 , _UpperCamelCase : List[str] = 0.9_99 , _UpperCamelCase : Optional[Any] = 1e-8 , _UpperCamelCase : List[str] = None , _UpperCamelCase : Optional[Any] = None , _UpperCamelCase : Any = 0.0 , _UpperCamelCase : int = 1.0 , _UpperCamelCase : int = None , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__UpperCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE = WarmUp(
initial_learning_rate=__UpperCAmelCase , decay_schedule_fn=__UpperCAmelCase , warmup_steps=__UpperCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE = AdamWeightDecay(
learning_rate=__UpperCAmelCase , weight_decay_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=__UpperCAmelCase , )
else:
SCREAMING_SNAKE_CASE = tf.keras.optimizers.Adam(
learning_rate=__UpperCAmelCase , beta_a=__UpperCAmelCase , beta_a=__UpperCAmelCase , epsilon=__UpperCAmelCase , clipnorm=__UpperCAmelCase , global_clipnorm=__UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCamelCase ( _snake_case ):
def __init__( self : List[Any] , snake_case__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , snake_case__ : float = 0.9 , snake_case__ : float = 0.999 , snake_case__ : float = 1E-7 , snake_case__ : bool = False , snake_case__ : float = 0.0 , snake_case__ : Optional[List[str]] = None , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "AdamWeightDecay" , **snake_case__ : str , ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
SCREAMING_SNAKE_CASE = weight_decay_rate
SCREAMING_SNAKE_CASE = include_in_weight_decay
SCREAMING_SNAKE_CASE = exclude_from_weight_decay
@classmethod
def UpperCamelCase ( cls : int , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {'WarmUp': WarmUp}
return super(snake_case__ , cls ).from_config(snake_case__ , custom_objects=snake_case__ )
def UpperCamelCase ( self : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Optional[Any] ):
"""simple docstring"""
super(snake_case__ , self )._prepare_local(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def UpperCamelCase ( self : List[Any] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Dict=None , **snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(zip(*snake_case__ ) )
return super(snake_case__ , self ).apply_gradients(zip(snake_case__ , snake_case__ ) , name=snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE = apply_state or {}
SCREAMING_SNAKE_CASE = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE = self._fallback_apply_state(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_lr(var.device , var.dtype.base_dtype , snake_case__ )
SCREAMING_SNAKE_CASE = self._decay_weights_op(snake_case__ , snake_case__ , snake_case__ )
with tf.control_dependencies([decay] ):
return super(snake_case__ , self )._resource_apply_dense(snake_case__ , snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self._get_lr(var.device , var.dtype.base_dtype , snake_case__ )
SCREAMING_SNAKE_CASE = self._decay_weights_op(snake_case__ , snake_case__ , snake_case__ )
with tf.control_dependencies([decay] ):
return super(snake_case__ , self )._resource_apply_sparse(snake_case__ , snake_case__ , snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(snake_case__ , snake_case__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(snake_case__ , snake_case__ ) is not None:
return False
return True
class UpperCamelCase ( _snake_case ):
def __init__( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = None
@property
def UpperCamelCase ( self : int ):
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=snake_case__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : str , snake_case__ : Any ):
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(snake_case__ ) , trainable=snake_case__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(snake_case__ ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(snake_case__ )}""" )
for accum_gradient, gradient in zip(self._gradients , snake_case__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(snake_case__ )
self._accum_steps.assign_add(1 )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(snake_case__ ) )
| 713 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 0 |
from __future__ import annotations
import math
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
SCREAMING_SNAKE_CASE = math.log(len(lowerCAmelCase__ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 714 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( __lowerCAmelCase , unittest.TestCase ):
__UpperCamelCase =RobertaTokenizer
__UpperCamelCase =RobertaTokenizerFast
__UpperCamelCase =True
__UpperCamelCase ={"cls_token": "<s>"}
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def UpperCamelCase ( self : int , **snake_case__ : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase ( self : int , **snake_case__ : Tuple ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def UpperCamelCase ( self : Any , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = """lower newer"""
return input_text, output_text
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE = """lower newer"""
SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
SCREAMING_SNAKE_CASE = tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCamelCase ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained('roberta-base' )
SCREAMING_SNAKE_CASE = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = """Encode this sequence."""
SCREAMING_SNAKE_CASE = tokenizer.byte_encoder[""" """.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE = """<mask>"""
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
SCREAMING_SNAKE_CASE = """Encode <mask> sequence"""
SCREAMING_SNAKE_CASE = """Encode <mask>sequence"""
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase )
SCREAMING_SNAKE_CASE = encoded.index(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.encode(_UpperCamelCase )
SCREAMING_SNAKE_CASE = encoded.index(_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = """A, <mask> AllenNLP sentence."""
SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
SCREAMING_SNAKE_CASE = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCamelCase )
def UpperCamelCase ( self : str ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE = F"""{text_of_1_token} {text_of_1_token}"""
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
SCREAMING_SNAKE_CASE = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
SCREAMING_SNAKE_CASE = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
| 715 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 0 |
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a_ : Tuple = get_logger()
a_ : List[str] = None
class UpperCamelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self : Tuple , snake_case__ : Union[str, Any]=None , snake_case__ : str=None , **snake_case__ : Dict ):
"""simple docstring"""
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A , _A ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
SCREAMING_SNAKE_CASE = device if isinstance(_A , _A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
SCREAMING_SNAKE_CASE = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE = jnp_array_kwargs
@staticmethod
def UpperCamelCase ( ):
"""simple docstring"""
import jax
return {str(_A ): device for device in jax.devices()}
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_A , _A ) and column:
if all(
isinstance(_A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A , axis=0 )
return column
def UpperCamelCase ( self : Tuple , snake_case__ : Optional[int] ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_A , (str, bytes, type(_A )) ):
return value
elif isinstance(_A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE = {}
if isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE = {'dtype': jnp.intaa}
else:
SCREAMING_SNAKE_CASE = {'dtype': jnp.intaa}
elif isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image ):
SCREAMING_SNAKE_CASE = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A , '__array__' ) and not isinstance(_A , jax.Array ):
SCREAMING_SNAKE_CASE = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def UpperCamelCase ( self : Tuple , snake_case__ : Dict ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _A , map_list=_A )
def UpperCamelCase ( self : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_row(_A )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_column(_A )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_column(_A , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE = self.recursive_tensorize(_A )
SCREAMING_SNAKE_CASE = self._consolidate(_A )
return column
def UpperCamelCase ( self : Any , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.numpy_arrow_extractor().extract_batch(_A )
SCREAMING_SNAKE_CASE = self.python_features_decoder.decode_batch(_A )
SCREAMING_SNAKE_CASE = self.recursive_tensorize(_A )
for column_name in batch:
SCREAMING_SNAKE_CASE = self._consolidate(batch[column_name] )
return batch
| 716 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
a_ : Any = int(input("Enter number: ").strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 717 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowerCAmelCase ( _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : Optional[Any]=10 , _UpperCamelCase : Tuple=1_00 , _UpperCamelCase : Optional[Any]=10_26 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Optional[int]="data/tokenized_stories_train_wikitext103.jbl" , _UpperCamelCase : Optional[Any]="igf_context_pairs.jbl" , ) -> List[str]:
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_datasets(
_A , _A , number=_A , min_len=10_26 , trim=_A )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
SCREAMING_SNAKE_CASE = load_gpta('gpt2' ).to(_A )
print('computing perplexity on objective set' )
SCREAMING_SNAKE_CASE = compute_perplexity(_A , _A , _A ).item()
print('perplexity on objective set:' , _A )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_A , _A , _A , _A , _A , _A , _A , _A )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : str=15 , _UpperCamelCase : List[str]=1_28 , _UpperCamelCase : int=1_00 , _UpperCamelCase : Dict="igf_model.pt" , ) -> Any:
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE = SecondaryLearner(_A )
# Train secondary learner
SCREAMING_SNAKE_CASE = train_secondary_learner(
_A , _A , max_epochs=_A , batch_size=_A , eval_freq=1_00 , igf_model_path=_A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Optional[Any]=10_00 , _UpperCamelCase : List[str]=16 , _UpperCamelCase : Dict=1.0 , _UpperCamelCase : int=recopy_gpta , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Dict=10 , _UpperCamelCase : List[Any]="gpt2_finetuned.pt" , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE = RandomSampler(_A )
SCREAMING_SNAKE_CASE = DataLoader(_A , sampler=_A )
SCREAMING_SNAKE_CASE = max_steps // (len(_A )) + 1
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = torch.zeros((1, context_len) , dtype=torch.long , device=_A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = recopy_model(_A , _A , _A )
model.train()
if secondary_learner is not None:
secondary_learner.to(_A )
secondary_learner.eval()
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print('Test perplexity, step' , _A , ':' , _A )
for epoch in range(int(_A ) ):
for step, example in enumerate(_A ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE = model(_A , labels=_A )
SCREAMING_SNAKE_CASE = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE = secondary_learner.forward(
torch.tensor(_A , dtype=torch.long , device=_A ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_A ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print('Test perplexity, step' , _A , ':' , _A )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _A )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=_A , type=_A , required=_A , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=_A , default=_A , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=_A , default=_A , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=_A , type=_A , required=_A , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=_A , default=_A , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=_A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=1_00 , type=_A , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=1_00 , type=_A , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=10_00 , type=_A , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=1_28 , type=_A , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=_A , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=_A , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=1_00 , type=_A , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=10_26 , type=_A , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=_A , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=_A , type=_A , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=_A , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_A , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=_A , type=_A , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_A , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
SCREAMING_SNAKE_CASE = training_secondary_learner(
_A , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=1_00 , min_len=10_26 , trim=_A )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_A , _A , _A , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_A , secondary_learner=_A , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 719 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase :
__UpperCamelCase =None
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , snake_case_ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(snake_case_ , 'feat_extract.json' )
feat_extract_first.to_json_file(snake_case_ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(snake_case_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.feature_extraction_class()
self.assertIsNotNone(snake_case_ )
| 720 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Any:
'''simple docstring'''
if b == 0:
return (1, 0)
(SCREAMING_SNAKE_CASE) = extended_euclid(__UpperCamelCase , a % b )
SCREAMING_SNAKE_CASE = a // b
return (y, x - k * y)
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
(SCREAMING_SNAKE_CASE) = extended_euclid(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE = na * na
SCREAMING_SNAKE_CASE = ra * x * na + ra * y * na
return (n % m + m) % m
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
(SCREAMING_SNAKE_CASE) = extended_euclid(__UpperCamelCase , __UpperCamelCase )
if b < 0:
SCREAMING_SNAKE_CASE = (b % n + n) % n
return b
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = invert_modulo(__UpperCamelCase , __UpperCamelCase ), invert_modulo(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE = na * na
SCREAMING_SNAKE_CASE = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 721 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : set ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = len(snake_case__ ), len(grid[0] )
if (
min(snake_case__ , snake_case__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE = 0
count += depth_first_search(snake_case__ , row + 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , row - 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col + 1 , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col - 1 , snake_case__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 0 |
'''simple docstring'''
import re
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(R'[^ a-z A-Z 0-9 \s]' , str_ )]
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> str:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE = split_input(__SCREAMING_SNAKE_CASE )
if upper:
SCREAMING_SNAKE_CASE = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> str:
'''simple docstring'''
return to_simple_case(__SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE = to_simple_case(__SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Any ) -> str:
'''simple docstring'''
return to_complex_case(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '_' )
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
return to_complex_case(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__("doctest").testmod()
| 701 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 0 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class UpperCamelCase :
def __init__( self : Union[str, Any] , snake_case__ : int , snake_case__ : Union[str, Any]=1_3 , snake_case__ : int=7 , snake_case__ : Optional[int]=True , snake_case__ : str=True , snake_case__ : Dict=False , snake_case__ : Tuple=True , snake_case__ : List[str]=9_9 , snake_case__ : Tuple=6_4 , snake_case__ : int=5 , snake_case__ : Any=4 , snake_case__ : List[str]=6_4 , snake_case__ : int="gelu" , snake_case__ : Any=0.1 , snake_case__ : str=0.1 , snake_case__ : str=5_1_2 , snake_case__ : str=1_6 , snake_case__ : Tuple=2 , snake_case__ : Any=0.02 , snake_case__ : str=3 , snake_case__ : Optional[int]=4 , snake_case__ : Tuple=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : str ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : str , snake_case__ : Dict , snake_case__ : str , snake_case__ : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MPNetModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE = model(__A , __A )
SCREAMING_SNAKE_CASE = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MPNetForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE = model(
__A , attention_mask=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : int , snake_case__ : str , snake_case__ : List[str] , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MPNetForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Any , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = MPNetForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
__A , attention_mask=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MPNetForTokenClassification(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(SCREAMING_SNAKE_CASE) = config_and_inputs
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__UpperCamelCase =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =True
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MPNetModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__A )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__A )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__A )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__A )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__A )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MPNetModel.from_pretrained('microsoft/mpnet-base' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE = model(__A )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 703 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str=True , _UpperCamelCase : Optional[Any]="pt" ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""add_prefix_space""": True} if isinstance(_UpperCamelCase , _UpperCamelCase ) and not line.startswith(' ' ) else {}
SCREAMING_SNAKE_CASE = padding_side
return tokenizer(
[line] , max_length=_UpperCamelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCamelCase , return_tensors=_UpperCamelCase , add_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=None , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = input_ids.ne(_UpperCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Tuple="train" , snake_case__ : Optional[int]=None , snake_case__ : Any=None , snake_case__ : int=None , snake_case__ : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = Path(_a ).joinpath(type_path + '.source' )
SCREAMING_SNAKE_CASE = Path(_a ).joinpath(type_path + '.target' )
SCREAMING_SNAKE_CASE = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE = max_source_length
SCREAMING_SNAKE_CASE = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
SCREAMING_SNAKE_CASE = tokenizer
SCREAMING_SNAKE_CASE = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE = self.prefix + linecache.getline(str(self.src_file ) , _a ).rstrip('\n' )
SCREAMING_SNAKE_CASE = linecache.getline(str(self.tgt_file ) , _a ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _a ) else self.tokenizer
)
SCREAMING_SNAKE_CASE = self.tokenizer.generator if isinstance(self.tokenizer , _a ) else self.tokenizer
SCREAMING_SNAKE_CASE = encode_line(_a , _a , self.max_source_length , 'right' )
SCREAMING_SNAKE_CASE = encode_line(_a , _a , self.max_target_length , 'right' )
SCREAMING_SNAKE_CASE = source_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE = target_inputs["""input_ids"""].squeeze()
SCREAMING_SNAKE_CASE = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : int ):
"""simple docstring"""
return [len(_a ) for x in Path(_a ).open().readlines()]
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.stack([x['input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE = torch.stack([x['attention_mask'] for x in batch] )
SCREAMING_SNAKE_CASE = torch.stack([x['decoder_input_ids'] for x in batch] )
SCREAMING_SNAKE_CASE = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _a )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _a )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE = trim_batch(_a , _a )
SCREAMING_SNAKE_CASE = trim_batch(_a , _a , attention_mask=_a )
SCREAMING_SNAKE_CASE = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
a_ : str = getLogger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : List[List] ) -> Any:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCamelCase ) )
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_git_info()
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , 'git_log.json' ) )
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=4 , **_UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
with open(_UpperCamelCase , 'w' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=_UpperCamelCase , **_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase ) as f:
return json.load(_UpperCamelCase )
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = git.Repo(search_parent_directories=_UpperCamelCase )
SCREAMING_SNAKE_CASE = {
"""repo_id""": str(_UpperCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __lowerCAmelCase ( _UpperCamelCase : Callable , _UpperCamelCase : Iterable ) -> Tuple:
'''simple docstring'''
return list(map(_UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCamelCase , 'wb' ) as f:
return pickle.dump(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
def remove_articles(_UpperCamelCase : Any ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCamelCase )
def white_space_fix(_UpperCamelCase : List[Any] ):
return " ".join(text.split() )
def remove_punc(_UpperCamelCase : Union[str, Any] ):
SCREAMING_SNAKE_CASE = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCamelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) )
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = normalize_answer(_UpperCamelCase ).split()
SCREAMING_SNAKE_CASE = normalize_answer(_UpperCamelCase ).split()
SCREAMING_SNAKE_CASE = Counter(_UpperCamelCase ) & Counter(_UpperCamelCase )
SCREAMING_SNAKE_CASE = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 1.0 * num_same / len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
return normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for hypo, pred in zip(_UpperCamelCase , _UpperCamelCase ):
em += exact_match_score(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
em /= len(_UpperCamelCase )
return {"em": em}
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
return model_prefix.startswith('rag' )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE = """dropout_rate"""
for p in extra_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if not hasattr(_UpperCamelCase , _UpperCamelCase ) and not hasattr(_UpperCamelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
continue
SCREAMING_SNAKE_CASE = p if hasattr(_UpperCamelCase , _UpperCamelCase ) else equivalent_param[p]
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
delattr(_UpperCamelCase , _UpperCamelCase )
return hparams, config
| 705 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 0 |
import os
import sys
import transformers
a_ : Tuple = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 706 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : int = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> str:
'''simple docstring'''
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 708 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 0 |
import math
import qiskit
def __lowerCAmelCase ( _UpperCamelCase : int = 1 , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
SCREAMING_SNAKE_CASE = qiskit.QuantumRegister(4 , 'qr' )
SCREAMING_SNAKE_CASE = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
SCREAMING_SNAKE_CASE = [input_a, input_a, carry_in]
SCREAMING_SNAKE_CASE = qiskit.QuantumCircuit(_lowercase , _lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowercase ) # measure the last two qbits
SCREAMING_SNAKE_CASE = qiskit.Aer.get_backend('aer_simulator' )
SCREAMING_SNAKE_CASE = qiskit.execute(_lowercase , _lowercase , shots=10_00 )
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 709 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
from math import isclose, sqrt
def __lowerCAmelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = point_y / 4 / point_x
SCREAMING_SNAKE_CASE = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
SCREAMING_SNAKE_CASE = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
SCREAMING_SNAKE_CASE = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
SCREAMING_SNAKE_CASE = outgoing_gradient**2 + 4
SCREAMING_SNAKE_CASE = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
SCREAMING_SNAKE_CASE = (point_y - outgoing_gradient * point_x) ** 2 - 1_00
SCREAMING_SNAKE_CASE = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
SCREAMING_SNAKE_CASE = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
SCREAMING_SNAKE_CASE = x_minus if isclose(UpperCAmelCase__ , UpperCAmelCase__ ) else x_plus
SCREAMING_SNAKE_CASE = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __lowerCAmelCase ( _UpperCamelCase : float = 1.4 , _UpperCamelCase : float = -9.6 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = first_x_coord
SCREAMING_SNAKE_CASE = first_y_coord
SCREAMING_SNAKE_CASE = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = next_point(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"""{solution() = }""")
| 710 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase ( __UpperCAmelCase ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , 'width_multiplier' ) )
class UpperCamelCase :
def __init__( self : int , snake_case__ : Optional[Any] , snake_case__ : Tuple=1_3 , snake_case__ : Dict=6_4 , snake_case__ : Dict=2 , snake_case__ : int=3 , snake_case__ : Any="swish" , snake_case__ : str=3 , snake_case__ : Union[str, Any]=3_2 , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=0.02 , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Tuple=1_0 , snake_case__ : Optional[Any]=None , snake_case__ : Any=0.25 , snake_case__ : Tuple=0.0 , snake_case__ : Union[str, Any]=0.0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = make_divisible(5_1_2 * width_multiplier , divisor=8 )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = conv_kernel_size
SCREAMING_SNAKE_CASE = output_stride
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = width_multiplier
SCREAMING_SNAKE_CASE = ffn_dropout
SCREAMING_SNAKE_CASE = attn_dropout
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self : str ):
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self : Tuple , snake_case__ : Any , snake_case__ : Any , snake_case__ : int , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MobileViTVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : str , snake_case__ : str , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__UpperCamelCase =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaModelTester(self )
SCREAMING_SNAKE_CASE = MobileViTVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase ( self : int ):
"""simple docstring"""
pass
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self : str ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Any ):
SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 5
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE = 2
for i in range(len(UpperCAmelCase_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = MobileViTVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : int ):
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase_ , return_tensors='pt' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ , target_sizes=[(5_0, 6_0)] )
SCREAMING_SNAKE_CASE = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase_ )
| 711 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCamelCase ( UpperCamelCase_ ):
def __init__( self : str , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = process
SCREAMING_SNAKE_CASE = params
def __len__( self : Tuple ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.dataset[i]
SCREAMING_SNAKE_CASE = self.process(UpperCamelCase__ , **self.params )
return processed
class UpperCamelCase ( UpperCamelCase_ ):
def __init__( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = loader
SCREAMING_SNAKE_CASE = infer
SCREAMING_SNAKE_CASE = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __len__( self : int ):
"""simple docstring"""
return len(self.loader )
def __iter__( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE = next(self.iterator )
SCREAMING_SNAKE_CASE = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE = processed
else:
SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE = processed
SCREAMING_SNAKE_CASE = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCamelCase ( UpperCamelCase_ ):
def __init__( self : str , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = iter(self.loader )
SCREAMING_SNAKE_CASE = None
return self
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
if self.subiterator is None:
SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
SCREAMING_SNAKE_CASE = next(self.subiterator )
return processed
class UpperCamelCase ( UpperCamelCase_ ):
def __iter__( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = iter(self.loader )
return self
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE = self.loader_batch_item()
SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
SCREAMING_SNAKE_CASE = processed
else:
SCREAMING_SNAKE_CASE = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE = len(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE = observed_batch_size
SCREAMING_SNAKE_CASE = processed
SCREAMING_SNAKE_CASE = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE = self.loader_batch_item()
SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE = processed
SCREAMING_SNAKE_CASE = item.pop('is_last' )
accumulator.append(UpperCamelCase__ )
return accumulator
class UpperCamelCase ( UpperCamelCase_ ):
def __init__( self : Optional[Any] , snake_case__ : Dataset , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = key
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.dataset[i][self.key]
class UpperCamelCase ( UpperCamelCase_ ):
def __init__( self : List[Any] , snake_case__ : Dataset , snake_case__ : str , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = dataset
SCREAMING_SNAKE_CASE = keya
SCREAMING_SNAKE_CASE = keya
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 712 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 0 |
from scipy.stats import spearmanr
import datasets
a_ : Any = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n"
a_ : int = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n"
a_ : Tuple = R"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def UpperCamelCase ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Dict=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = spearmanr(snake_case__ , snake_case__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 714 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : Optional[Any] = {
"vocab_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt",
},
"tokenizer_file": {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"
),
"google/realm-orqa-nq-openqa": (
"https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-nq-reader": (
"https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-openqa": (
"https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"
),
"google/realm-orqa-wq-reader": (
"https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"
),
},
}
a_ : Dict = {
"google/realm-cc-news-pretrained-embedder": 512,
"google/realm-cc-news-pretrained-encoder": 512,
"google/realm-cc-news-pretrained-scorer": 512,
"google/realm-cc-news-pretrained-openqa": 512,
"google/realm-orqa-nq-openqa": 512,
"google/realm-orqa-nq-reader": 512,
"google/realm-orqa-wq-openqa": 512,
"google/realm-orqa-wq-reader": 512,
}
a_ : str = {
"google/realm-cc-news-pretrained-embedder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-encoder": {"do_lower_case": True},
"google/realm-cc-news-pretrained-scorer": {"do_lower_case": True},
"google/realm-cc-news-pretrained-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-openqa": {"do_lower_case": True},
"google/realm-orqa-nq-reader": {"do_lower_case": True},
"google/realm-orqa-wq-openqa": {"do_lower_case": True},
"google/realm-orqa-wq-reader": {"do_lower_case": True},
}
class UpperCamelCase ( lowercase__ ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =RealmTokenizer
def __init__( self : List[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None , snake_case__ : List[Any]=True , snake_case__ : List[str]="[UNK]" , snake_case__ : int="[SEP]" , snake_case__ : int="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : Dict="[MASK]" , snake_case__ : List[Any]=True , snake_case__ : Union[str, Any]=None , **snake_case__ : Tuple , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(__lowercase , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**__lowercase )
SCREAMING_SNAKE_CASE = do_lower_case
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Dict , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE = text
SCREAMING_SNAKE_CASE = kwargs.pop('text_pair' , __lowercase )
SCREAMING_SNAKE_CASE = kwargs.pop('return_tensors' , __lowercase )
SCREAMING_SNAKE_CASE = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
SCREAMING_SNAKE_CASE = batch_text_pair[idx]
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE = encoded_candidates.get('input_ids' )
SCREAMING_SNAKE_CASE = encoded_candidates.get('attention_mask' )
SCREAMING_SNAKE_CASE = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
SCREAMING_SNAKE_CASE = {key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def UpperCamelCase ( self : List[Any] , snake_case__ : Any , snake_case__ : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 715 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> int:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
SCREAMING_SNAKE_CASE = str(abs(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE = [list(_lowerCAmelCase ) for char in range(len(_lowerCAmelCase ) )]
for index in range(len(_lowerCAmelCase ) ):
num_transpositions[index].pop(_lowerCAmelCase )
return max(
int(''.join(list(_lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 716 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
a_ : List[str] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 6_5536,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8000,
'''sample_size''': 13_1072,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6000,
'''sample_size''': 6_5536,
},
}
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
return torch.atana(_UpperCamelCase , _UpperCamelCase ) / math.pi * 2
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.sin(t * math.pi / 2 ) ** 2
SCREAMING_SNAKE_CASE = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(_UpperCamelCase , _UpperCamelCase )
class UpperCamelCase ( UpperCamelCase__ ):
pass
class UpperCamelCase ( nn.Module ):
def __init__( self : Any , snake_case__ : Any ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = DiffusionAttnUnetaD(snake_case__ , n_attn_layers=4 )
SCREAMING_SNAKE_CASE = deepcopy(self.diffusion )
SCREAMING_SNAKE_CASE = torch.quasirandom.SobolEngine(1 , scramble=snake_case__ )
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['url']
os.system(f"""wget {url} ./""" )
return f"""./{model_name}.ckpt"""
a_ : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
a_ : List[str] = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
a_ : List[Any] = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
a_ : Optional[int] = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
a_ : List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
a_ : Tuple = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(f"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Any:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(_UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ):
return name.replace(_UpperCamelCase , _UpperCamelCase )
elif name.startswith(_UpperCamelCase ):
return [name.replace(_UpperCamelCase , _UpperCamelCase ) for v in value]
raise ValueError(f"""Attn error with {name}""" )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]=13 ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
SCREAMING_SNAKE_CASE = 0
if string.startswith('net.3.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[6:]
elif string.startswith('net.' ):
SCREAMING_SNAKE_CASE = string[4:]
while string.startswith('main.7.' ):
depth += 1
SCREAMING_SNAKE_CASE = string[7:]
if string.startswith('main.' ):
SCREAMING_SNAKE_CASE = string[5:]
# mid block
if string[:2].isdigit():
SCREAMING_SNAKE_CASE = string[:2]
SCREAMING_SNAKE_CASE = string[2:]
else:
SCREAMING_SNAKE_CASE = string[0]
SCREAMING_SNAKE_CASE = string[1:]
if depth == max_depth:
SCREAMING_SNAKE_CASE = MID_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = 'mid_block'
elif depth > 0 and int(_UpperCamelCase ) < 7:
SCREAMING_SNAKE_CASE = DOWN_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = f"""down_blocks.{depth}"""
elif depth > 0 and int(_UpperCamelCase ) > 7:
SCREAMING_SNAKE_CASE = UP_NUM_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
SCREAMING_SNAKE_CASE = DEPTH_0_TO_LAYER[layer_num]
SCREAMING_SNAKE_CASE = f"""up_blocks.{max_depth - 1}""" if int(_UpperCamelCase ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(f"""Naming error with {input_string} and string_left: {string_left}.""" )
SCREAMING_SNAKE_CASE = string_left[1:]
if "resnets" in new_layer:
SCREAMING_SNAKE_CASE = convert_resconv_naming(_UpperCamelCase )
elif "attentions" in new_layer:
SCREAMING_SNAKE_CASE = convert_attn_naming(_UpperCamelCase )
SCREAMING_SNAKE_CASE = new_string_left
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = prefix + '.' + new_layer + '.' + string_left
else:
SCREAMING_SNAKE_CASE = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
SCREAMING_SNAKE_CASE = rename(_UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = transform_conv_attns(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = v
return new_state_dict
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
if len(_UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
SCREAMING_SNAKE_CASE = v[:, :, 0]
else:
# bias
SCREAMING_SNAKE_CASE = v
else:
# qkv matrices
SCREAMING_SNAKE_CASE = v.shape[0]
SCREAMING_SNAKE_CASE = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
SCREAMING_SNAKE_CASE = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
SCREAMING_SNAKE_CASE = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
SCREAMING_SNAKE_CASE = download(_UpperCamelCase )
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_rate']
SCREAMING_SNAKE_CASE = MODELS_MAP[model_name]['sample_size']
SCREAMING_SNAKE_CASE = Object()
SCREAMING_SNAKE_CASE = sample_size
SCREAMING_SNAKE_CASE = sample_rate
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=_UpperCamelCase , sample_rate=_UpperCamelCase )
SCREAMING_SNAKE_CASE = diffusers_model.state_dict()
SCREAMING_SNAKE_CASE = DiffusionUncond(_UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=_UpperCamelCase )['state_dict'] )
SCREAMING_SNAKE_CASE = orig_model.diffusion_ema.eval()
SCREAMING_SNAKE_CASE = orig_model.state_dict()
SCREAMING_SNAKE_CASE = rename_orig_weights(_UpperCamelCase )
SCREAMING_SNAKE_CASE = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
SCREAMING_SNAKE_CASE = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(_UpperCamelCase ) == 0, f"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(_UpperCamelCase ) ), f"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f"""Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
SCREAMING_SNAKE_CASE = value.squeeze()
SCREAMING_SNAKE_CASE = value
diffusers_model.load_state_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 1_00
SCREAMING_SNAKE_CASE = 33
SCREAMING_SNAKE_CASE = IPNDMScheduler(num_train_timesteps=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.manual_seed(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.randn([1, 2, config.sample_size] , generator=_UpperCamelCase ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.linspace(1 , 0 , steps + 1 , device=_UpperCamelCase )[:-1]
SCREAMING_SNAKE_CASE = get_crash_schedule(_UpperCamelCase )
SCREAMING_SNAKE_CASE = DanceDiffusionPipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.manual_seed(33 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=_UpperCamelCase , generator=_UpperCamelCase ).audios
SCREAMING_SNAKE_CASE = sampling.iplms_sample(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , {} )
SCREAMING_SNAKE_CASE = generated.clamp(-1 , 1 )
SCREAMING_SNAKE_CASE = (generated - audio).abs().sum()
SCREAMING_SNAKE_CASE = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , _UpperCamelCase )
print('Diff max' , _UpperCamelCase )
assert diff_max < 1e-3, f"""Diff max: {diff_max} is too much :-/"""
print(f"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
a_ : Any = parser.parse_args()
main(args)
| 717 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a_ = logging.getLogger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : str=2 , _UpperCamelCase : Any=3 , _UpperCamelCase : Optional[int]=16 , _UpperCamelCase : Optional[Any] = 10 , _UpperCamelCase : Tuple = 2 ) -> Any:
'''simple docstring'''
def get_dataset(_UpperCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
SCREAMING_SNAKE_CASE = get_dataset(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = get_dataset(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
SCREAMING_SNAKE_CASE = DataLoader(__lowerCAmelCase , shuffle=__lowerCAmelCase , batch_size=__lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any]=None ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for epoch in range(__lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
SCREAMING_SNAKE_CASE = batch
SCREAMING_SNAKE_CASE = model(__lowerCAmelCase )
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(__lowerCAmelCase , __lowerCAmelCase )
accelerator.backward(__lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCamelCase ( nn.Module ):
def __init__( self : Union[str, Any] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
def UpperCamelCase ( self : str , snake_case__ : Optional[int] ):
"""simple docstring"""
return x * self.a + self.b
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 , project_dir=lowerCamelCase__ , automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ , 'initial' )
accelerator.save_state(lowerCamelCase__ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(lowerCamelCase__ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase__ , 'checkpoint' )
accelerator.save_state(lowerCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(lowerCamelCase__ )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
SCREAMING_SNAKE_CASE = train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
accelerator.load_state(os.path.join(lowerCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE = train(2 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
(SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item()
SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] )
SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() )
SCREAMING_SNAKE_CASE = Accelerator()
with self.assertRaises(lowerCamelCase__ ) as ve:
accelerator.register_for_checkpointing(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
SCREAMING_SNAKE_CASE = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(lowerCamelCase__ , step_size=1 , gamma=0.99 )
SCREAMING_SNAKE_CASE = dummy_dataloaders()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save initial
accelerator.save_state()
SCREAMING_SNAKE_CASE = scheduler.state_dict()
train(3 , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
self.assertNotEqual(lowerCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowerCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(lowerCamelCase__ , scheduler.state_dict() )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
SCREAMING_SNAKE_CASE = DummyModel()
SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=lowerCamelCase__ , total_limit=2 )
# Train baseline
SCREAMING_SNAKE_CASE = Accelerator(project_dir=lowerCamelCase__ , project_config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = accelerator.prepare(lowerCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowerCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowerCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
a_ = "/tmp/accelerate/state_checkpointing"
a_ = DummyModel()
a_ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
a_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
a_ , a_ = dummy_dataloaders()
a_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a_ , a_ , a_ , a_ , a_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a_ , a_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a_ = group["params"][0].device
break
assert param_device.type == accelerator.device.type
a_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
a_ = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
a_ = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a_ : Tuple = None
try:
import msvcrt
except ImportError:
a_ : List[Any] = None
try:
import fcntl
except ImportError:
a_ : List[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a_ : Union[str, Any] = OSError
# Data
# ------------------------------------------------
a_ : str = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
a_ : Any = "3.0.12"
a_ : Union[str, Any] = None
def __lowerCAmelCase ( ) -> Optional[Any]:
'''simple docstring'''
global _logger
SCREAMING_SNAKE_CASE = _logger or logging.getLogger(__name__ )
return _logger
class UpperCamelCase ( __a ):
def __init__( self : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = lock_file
return None
def __str__( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = F"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class UpperCamelCase :
def __init__( self : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = lock
return None
def __enter__( self : int ):
"""simple docstring"""
return self.lock
def __exit__( self : int , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple ):
"""simple docstring"""
self.lock.release()
return None
class UpperCamelCase :
def __init__( self : List[Any] , snake_case__ : List[Any] , snake_case__ : Dict=-1 , snake_case__ : int=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
SCREAMING_SNAKE_CASE = self.hash_filename_if_too_long(lowerCAmelCase_ , lowerCAmelCase_ )
# The path to the lock file.
SCREAMING_SNAKE_CASE = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
SCREAMING_SNAKE_CASE = None
# The default timeout value.
SCREAMING_SNAKE_CASE = timeout
# We use this lock primarily for the lock counter.
SCREAMING_SNAKE_CASE = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
SCREAMING_SNAKE_CASE = 0
return None
@property
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return self._lock_file
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCamelCase ( self : str , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = float(lowerCAmelCase_ )
return None
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCamelCase ( self : str , snake_case__ : Dict=None , snake_case__ : Optional[int]=0.05 ):
"""simple docstring"""
if timeout is None:
SCREAMING_SNAKE_CASE = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
SCREAMING_SNAKE_CASE = id(self )
SCREAMING_SNAKE_CASE = self._lock_file
SCREAMING_SNAKE_CASE = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(lowerCAmelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
SCREAMING_SNAKE_CASE = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int]=False ):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
SCREAMING_SNAKE_CASE = id(self )
SCREAMING_SNAKE_CASE = self._lock_file
logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
SCREAMING_SNAKE_CASE = 0
logger().debug(F"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : List[Any] ):
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
self.release()
return None
def __del__( self : int ):
"""simple docstring"""
self.release(force=lowerCAmelCase_ )
return None
def UpperCamelCase ( self : Tuple , snake_case__ : Dict , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = os.path.basename(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > max_length and max_length > 0:
SCREAMING_SNAKE_CASE = os.path.dirname(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE = str(hash(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE = filename[: max_length - len(lowerCAmelCase_ ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
else:
return path
class UpperCamelCase ( __a ):
def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Tuple=-1 , snake_case__ : int=None ):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(lowerCAmelCase_ , timeout=lowerCAmelCase_ , max_filename_length=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE = os.open(self._lock_file , lowerCAmelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCAmelCase_ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE = fd
return None
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._lock_file_fd
SCREAMING_SNAKE_CASE = None
msvcrt.locking(lowerCAmelCase_ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCAmelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCamelCase ( __a ):
def __init__( self : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any]=-1 , snake_case__ : int=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = os.statvfs(os.path.dirname(lowerCAmelCase_ ) ).f_namemax
super().__init__(lowerCAmelCase_ , timeout=lowerCAmelCase_ , max_filename_length=lowerCAmelCase_ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
SCREAMING_SNAKE_CASE = os.open(self._lock_file , lowerCAmelCase_ )
try:
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE = fd
return None
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._lock_file_fd
SCREAMING_SNAKE_CASE = None
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_UN )
os.close(lowerCAmelCase_ )
return None
class UpperCamelCase ( __a ):
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
SCREAMING_SNAKE_CASE = os.open(self._lock_file , lowerCAmelCase_ )
except OSError:
pass
else:
SCREAMING_SNAKE_CASE = fd
return None
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
os.close(self._lock_file_fd )
SCREAMING_SNAKE_CASE = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a_ : Optional[int] = None
if msvcrt:
a_ : List[str] = WindowsFileLock
elif fcntl:
a_ : List[str] = UnixFileLock
else:
a_ : Tuple = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 719 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10 ) -> str:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or n < 0:
raise ValueError('Invalid input' )
SCREAMING_SNAKE_CASE = 10**n
SCREAMING_SNAKE_CASE = 2_84_33 * (pow(2 , 7_83_04_57 , UpperCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 720 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in range(len(_UpperCamelCase ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE = '_'
if count > 1:
return False
else:
return "".join(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : list[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
while True:
SCREAMING_SNAKE_CASE = ['$'] * len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
for i in range(len(_UpperCamelCase ) ):
for j in range(i + 1 , len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE = compare_string(binary[i] , binary[j] )
if k is False:
SCREAMING_SNAKE_CASE = '*'
SCREAMING_SNAKE_CASE = '*'
temp.append('X' )
for i in range(len(_UpperCamelCase ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_UpperCamelCase ) == 0:
return pi
SCREAMING_SNAKE_CASE = list(set(_UpperCamelCase ) )
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Sequence[float] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for minterm in minterms:
SCREAMING_SNAKE_CASE = ''
for _ in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = str(minterm % 2 ) + string
minterm //= 2
temp.append(_UpperCamelCase )
return temp
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in range(len(_UpperCamelCase ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCAmelCase ( _UpperCamelCase : list[list[int]] , _UpperCamelCase : list[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [0] * len(_UpperCamelCase )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = -1
for j in range(len(_UpperCamelCase ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE = j
if count == 1:
SCREAMING_SNAKE_CASE = 1
for i in range(len(_UpperCamelCase ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = 0
for i in range(len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE = count_n
SCREAMING_SNAKE_CASE = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE = 0
def __lowerCAmelCase ( _UpperCamelCase : list[str] , _UpperCamelCase : list[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [[0 for x in range(len(_UpperCamelCase ) )] for x in range(len(_UpperCamelCase ) )]
for i in range(len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE = prime_implicants[i].count('_' )
for j in range(len(_UpperCamelCase ) ):
if is_for_table(prime_implicants[i] , binary[j] , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = 1
return chart
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(input('Enter the no. of variables\n' ) )
SCREAMING_SNAKE_CASE = [
float(_UpperCamelCase )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
SCREAMING_SNAKE_CASE = decimal_to_binary(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = check(_UpperCamelCase )
print('Prime Implicants are:' )
print(_UpperCamelCase )
SCREAMING_SNAKE_CASE = prime_implicant_chart(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = selection(_UpperCamelCase , _UpperCamelCase )
print('Essential Prime Implicants are:' )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 721 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ : Any = logging.getLogger(__name__)
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase =field(
default=__a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase =field(
default=__a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase =field(
default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__UpperCamelCase =field(default=__a , metadata={"help": "Whether tp freeze the encoder."} )
__UpperCamelCase =field(default=__a , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
__UpperCamelCase =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__UpperCamelCase =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__UpperCamelCase =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase =field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
__UpperCamelCase =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
__UpperCamelCase =field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
__UpperCamelCase =field(default=__a , metadata={"help": "Source language id for translation."} )
__UpperCamelCase =field(default=__a , metadata={"help": "Target language id for translation."} )
__UpperCamelCase =field(default=__a , metadata={"help": "# num_beams to use for evaluation."} )
__UpperCamelCase =field(
default=__a , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
logger.info(f"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(f""" {key} = {metrics[key]}""" )
save_json(_UpperCamelCase , os.path.join(_UpperCamelCase , f"""{split}_results.json""" ) )
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
check_output_dir(_UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
assert hasattr(_UpperCamelCase , _UpperCamelCase ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_UpperCamelCase , _UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_UpperCamelCase , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_UpperCamelCase , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
SCREAMING_SNAKE_CASE = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_UpperCamelCase , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_UpperCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
SCREAMING_SNAKE_CASE = SeqaSeqDataset
# Get datasets
SCREAMING_SNAKE_CASE = (
dataset_class(
_UpperCamelCase , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE = (
dataset_class(
_UpperCamelCase , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
SCREAMING_SNAKE_CASE = (
dataset_class(
_UpperCamelCase , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = (
build_compute_metrics_fn(data_args.task , _UpperCamelCase ) if training_args.predict_with_generate else None
)
SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=_UpperCamelCase , args=_UpperCamelCase , data_args=_UpperCamelCase , train_dataset=_UpperCamelCase , eval_dataset=_UpperCamelCase , data_collator=SeqaSeqDataCollator(
_UpperCamelCase , _UpperCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
SCREAMING_SNAKE_CASE = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
SCREAMING_SNAKE_CASE = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
SCREAMING_SNAKE_CASE = train_result.metrics
SCREAMING_SNAKE_CASE = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE = trainer.evaluate(metric_key_prefix='val' )
SCREAMING_SNAKE_CASE = data_args.n_val
SCREAMING_SNAKE_CASE = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
SCREAMING_SNAKE_CASE = trainer.predict(test_dataset=_UpperCamelCase , metric_key_prefix='test' )
SCREAMING_SNAKE_CASE = test_output.metrics
SCREAMING_SNAKE_CASE = data_args.n_test
if trainer.is_world_process_zero():
SCREAMING_SNAKE_CASE = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _UpperCamelCase , training_args.output_dir )
all_metrics.update(_UpperCamelCase )
if training_args.predict_with_generate:
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
SCREAMING_SNAKE_CASE = lmap(str.strip , _UpperCamelCase )
write_txt_file(_UpperCamelCase , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_UpperCamelCase , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def __lowerCAmelCase ( _UpperCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 700 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 0 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =['image_processor', 'tokenizer']
__UpperCamelCase ='OwlViTImageProcessor'
__UpperCamelCase =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , snake_case__ : Union[str, Any]=None , snake_case__ : Dict=None , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ , snake_case__ )
def __call__( self : Tuple , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Tuple=None , snake_case__ : Union[str, Any]="max_length" , snake_case__ : int="np" , **snake_case__ : Tuple ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(snake_case__ , snake_case__ ) or (isinstance(snake_case__ , snake_case__ ) and not isinstance(text[0] , snake_case__ )):
SCREAMING_SNAKE_CASE = [self.tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ , **snake_case__ )]
elif isinstance(snake_case__ , snake_case__ ) and isinstance(text[0] , snake_case__ ):
SCREAMING_SNAKE_CASE = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE = max([len(snake_case__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(snake_case__ ) != max_num_queries:
SCREAMING_SNAKE_CASE = t + [' '] * (max_num_queries - len(snake_case__ ))
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , padding=snake_case__ , return_tensors=snake_case__ , **snake_case__ )
encodings.append(snake_case__ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
SCREAMING_SNAKE_CASE = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
SCREAMING_SNAKE_CASE = BatchEncoding()
SCREAMING_SNAKE_CASE = input_ids
SCREAMING_SNAKE_CASE = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE = BatchEncoding()
SCREAMING_SNAKE_CASE = self.image_processor(
snake_case__ , return_tensors=snake_case__ , **snake_case__ ).pixel_values
SCREAMING_SNAKE_CASE = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=snake_case__ , **snake_case__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) , tensor_type=snake_case__ )
def UpperCamelCase ( self : Any , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.image_processor.post_process(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Any , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Any , *snake_case__ : List[str] , **snake_case__ : Any ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Dict , *snake_case__ : List[Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Optional[int] , *snake_case__ : Optional[int] , **snake_case__ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case__ , )
return self.image_processor_class
@property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case__ , )
return self.image_processor
| 701 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ["YolosFeatureExtractor"]
a_ : Dict = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 0 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class UpperCamelCase ( a__ ):
__UpperCamelCase =42
__UpperCamelCase =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class UpperCamelCase ( a__ ):
__UpperCamelCase =42
__UpperCamelCase =42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker | 703 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 0 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = num - 1
SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE = i + 1
SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCAmelCase ( _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( _UpperCamelCase : int = 10_24 ) -> List[Any]:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ : int = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 704 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Dict = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 0 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 5_0_0
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=snake_case__ ) as mock_head:
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 5_0_0
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=snake_case__ ) as mock_head:
SCREAMING_SNAKE_CASE = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self : str ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE = tempfile.mktemp()
with open(snake_case__ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , snake_case__ )
SCREAMING_SNAKE_CASE = AlbertTokenizer.from_pretrained(snake_case__ )
finally:
os.remove(snake_case__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , snake_case__ )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
__UpperCamelCase =["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def UpperCamelCase ( cls : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def UpperCamelCase ( cls : Optional[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , 'vocab.txt' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE = BertTokenizer(snake_case__ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(snake_case__ , repo_id='test-tokenizer' , push_to_hub=snake_case__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , 'vocab.txt' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE = BertTokenizer(snake_case__ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
snake_case__ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=snake_case__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def UpperCamelCase ( self : int ):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , 'vocab.txt' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE = CustomTokenizer(snake_case__ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = os.path.join(snake_case__ , 'vocab.txt' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained(snake_case__ )
bert_tokenizer.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE = CustomTokenizerFast.from_pretrained(snake_case__ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=snake_case__ , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Trie()
SCREAMING_SNAKE_CASE = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(snake_case__ , ['AB', 'C'] )
| 706 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : str , snake_case__ : str=1_3 , snake_case__ : Tuple=7 , snake_case__ : Tuple=True , snake_case__ : Tuple=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Union[str, Any]=9_9 , snake_case__ : Dict=3_2 , snake_case__ : Optional[Any]=5 , snake_case__ : Optional[Any]=4 , snake_case__ : Union[str, Any]=3_7 , snake_case__ : Tuple="gelu" , snake_case__ : Dict=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : int=4 , snake_case__ : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , use_stable_embedding=snake_case__ , )
def UpperCamelCase ( self : int , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = OpenLlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , use_cache=snake_case__ , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , encoder_hidden_states=snake_case__ , encoder_attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase =(
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = OpenLlamaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'single_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 'multi_label_classification'
SCREAMING_SNAKE_CASE = input_dict['input_ids']
SCREAMING_SNAKE_CASE = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE = OpenLlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = ids_tensor([1, 1_0] , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = original_model(snake_case__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE = OpenLlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
| 673 | 0 |
import argparse
import json
from tqdm import tqdm
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=lowercase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=lowercase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=lowercase_ , help='where to store parsed gold_data_path file' , )
SCREAMING_SNAKE_CASE = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
SCREAMING_SNAKE_CASE = json.load(lowercase_ )
for dpr_record in tqdm(lowercase_ ):
SCREAMING_SNAKE_CASE = dpr_record["question"]
SCREAMING_SNAKE_CASE = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(lowercase_ ) + '\n' )
if __name__ == "__main__":
main()
| 707 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="openai/whisper-base"
__UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__UpperCamelCase ="transcriber"
__UpperCamelCase =WhisperProcessor
__UpperCamelCase =WhisperForConditionalGeneration
__UpperCamelCase =["audio"]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Dict , snake_case__ : Tuple ):
"""simple docstring"""
return self.pre_processor(snake_case__ , return_tensors='pt' ).input_features
def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple ):
"""simple docstring"""
return self.model.generate(inputs=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0]
| 673 | 0 |
import math
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : Tuple = 0.1 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a_ : Union[str, Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a_ : List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=1_00 , _UpperCamelCase : str=" " ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = text.split(_UpperCamelCase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )]
def __lowerCAmelCase ( _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(_UpperCamelCase ):
titles.append(title if title is not None else '' )
texts.append(_UpperCamelCase )
return {"title": titles, "text": texts}
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=_UpperCamelCase , padding='longest' , return_tensors='pt' )['input_ids']
SCREAMING_SNAKE_CASE = ctx_encoder(input_ids.to(device=_UpperCamelCase ) , return_dict=_UpperCamelCase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , ) -> Dict:
'''simple docstring'''
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
SCREAMING_SNAKE_CASE = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
SCREAMING_SNAKE_CASE = dataset.map(_UpperCamelCase , batched=_UpperCamelCase , num_proc=processing_args.num_proc )
# And compute the embeddings
SCREAMING_SNAKE_CASE = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=_UpperCamelCase )
SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
SCREAMING_SNAKE_CASE = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
SCREAMING_SNAKE_CASE = dataset.map(
partial(_UpperCamelCase , ctx_encoder=_UpperCamelCase , ctx_tokenizer=_UpperCamelCase ) , batched=_UpperCamelCase , batch_size=processing_args.batch_size , features=_UpperCamelCase , )
# And finally save your dataset
SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(_UpperCamelCase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
SCREAMING_SNAKE_CASE = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=_UpperCamelCase )
# And save the index
SCREAMING_SNAKE_CASE = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(_UpperCamelCase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=str(Path(_lowerCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns \'title\' and \'text\'"} , )
__UpperCamelCase =field(
default=_lowerCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'."} , )
__UpperCamelCase =field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\'"} , )
__UpperCamelCase =field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or"
" \'facebook/dpr-ctx_encoder-multiset-base\'"
)
} , )
__UpperCamelCase =field(
default=str(Path(_lowerCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=_lowerCAmelCase , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
__UpperCamelCase =field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class UpperCamelCase :
__UpperCamelCase =field(
default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
__UpperCamelCase =field(
default=1_28 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a_ : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a_ : Optional[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a_ : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 709 |
import numpy as np
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Tuple = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class UpperCamelCase ( UpperCamelCase__ ):
__UpperCamelCase ="xmod"
def __init__( self : Optional[Any] , snake_case__ : Optional[int]=3_0_5_2_2 , snake_case__ : int=7_6_8 , snake_case__ : str=1_2 , snake_case__ : Dict=1_2 , snake_case__ : List[str]=3_0_7_2 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=5_1_2 , snake_case__ : int=2 , snake_case__ : str=0.02 , snake_case__ : Optional[int]=1E-12 , snake_case__ : int=1 , snake_case__ : Any=0 , snake_case__ : List[Any]=2 , snake_case__ : Union[str, Any]="absolute" , snake_case__ : List[Any]=True , snake_case__ : int=None , snake_case__ : Optional[Any]=False , snake_case__ : List[str]=2 , snake_case__ : Tuple=False , snake_case__ : int=True , snake_case__ : Optional[Any]=True , snake_case__ : int=("en_XX",) , snake_case__ : Dict=None , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = pre_norm
SCREAMING_SNAKE_CASE = adapter_reduction_factor
SCREAMING_SNAKE_CASE = adapter_layer_norm
SCREAMING_SNAKE_CASE = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE = ln_before_adapter
SCREAMING_SNAKE_CASE = list(_a )
SCREAMING_SNAKE_CASE = default_language
class UpperCamelCase ( UpperCamelCase__ ):
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 710 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ : Any = logging.get_logger(__name__)
a_ : Dict = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="van"
def __init__( self : Optional[Any] , snake_case__ : Tuple=2_2_4 , snake_case__ : Dict=3 , snake_case__ : Union[str, Any]=[7, 3, 3, 3] , snake_case__ : str=[4, 2, 2, 2] , snake_case__ : Optional[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case__ : Optional[Any]=[3, 3, 1_2, 3] , snake_case__ : Tuple=[8, 8, 4, 4] , snake_case__ : Any="gelu" , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1E-6 , snake_case__ : int=1E-2 , snake_case__ : Any=0.0 , snake_case__ : Tuple=0.0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(**snake_case__ )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 673 | 0 |
a_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ : Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a_ : List[Any] = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> str:
'''simple docstring'''
assert len(str(__UpperCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
SCREAMING_SNAKE_CASE = year // 1_00
SCREAMING_SNAKE_CASE = (5 * (century % 4) + 2) % 7
SCREAMING_SNAKE_CASE = year % 1_00
SCREAMING_SNAKE_CASE = centurian % 12
SCREAMING_SNAKE_CASE = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
SCREAMING_SNAKE_CASE = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
SCREAMING_SNAKE_CASE = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 673 | 0 |
from __future__ import annotations
from random import choice
def __lowerCAmelCase ( _UpperCamelCase : str ) -> List[str]:
'''simple docstring'''
return choice(lowerCamelCase_ )
def __lowerCAmelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = random_pivot(lowerCamelCase_ )
# partition based on pivot
# linear time
SCREAMING_SNAKE_CASE = [e for e in lst if e < pivot]
SCREAMING_SNAKE_CASE = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase_ ) < k - 1:
return kth_number(lowerCamelCase_ , k - len(lowerCamelCase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a_ : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , **snake_case__ : Optional[int] ):
"""simple docstring"""
super().__init__(**snake_case__ )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(snake_case__ )
def __call__( self : List[Any] , snake_case__ : Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case__ : Union[str, List[str]] = None , **snake_case__ : Union[str, Any] , ):
"""simple docstring"""
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE = kwargs.pop('text_queries' )
if isinstance(snake_case__ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE = {'image': image, 'candidate_labels': candidate_labels}
else:
SCREAMING_SNAKE_CASE = image
SCREAMING_SNAKE_CASE = super().__call__(snake_case__ , **snake_case__ )
return results
def UpperCamelCase ( self : Union[str, Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['threshold']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self : List[Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE = inputs['candidate_labels']
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = candidate_labels.split(',' )
SCREAMING_SNAKE_CASE = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE = self.tokenizer(snake_case__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE = self.image_processor(snake_case__ , return_tensors=self.framework )
yield {
"is_last": i == len(snake_case__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self : Any , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = model_inputs.pop('target_size' )
SCREAMING_SNAKE_CASE = model_inputs.pop('candidate_label' )
SCREAMING_SNAKE_CASE = model_inputs.pop('is_last' )
SCREAMING_SNAKE_CASE = self.model(**snake_case__ )
SCREAMING_SNAKE_CASE = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : str=0.1 , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE = model_output['candidate_label']
SCREAMING_SNAKE_CASE = BaseModelOutput(snake_case__ )
SCREAMING_SNAKE_CASE = self.image_processor.post_process_object_detection(
outputs=snake_case__ , threshold=snake_case__ , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE = outputs['scores'][index].item()
SCREAMING_SNAKE_CASE = self._get_bounding_box(outputs['boxes'][index][0] )
SCREAMING_SNAKE_CASE = {'score': score, 'label': label, 'box': box}
results.append(snake_case__ )
SCREAMING_SNAKE_CASE = sorted(snake_case__ , key=lambda snake_case__ : x["score"] , reverse=snake_case__ )
if top_k:
SCREAMING_SNAKE_CASE = results[:top_k]
return results
def UpperCamelCase ( self : List[Any] , snake_case__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = box.int().tolist()
SCREAMING_SNAKE_CASE = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 673 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase ( _UpperCamelCase : str ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = analyze_text(__snake_case )
SCREAMING_SNAKE_CASE = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
SCREAMING_SNAKE_CASE = sum(single_char_strings.values() )
# one length string
SCREAMING_SNAKE_CASE = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
SCREAMING_SNAKE_CASE = single_char_strings[ch]
SCREAMING_SNAKE_CASE = my_str / all_sum
my_fir_sum += prob * math.loga(__snake_case ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
SCREAMING_SNAKE_CASE = sum(two_char_strings.values() )
SCREAMING_SNAKE_CASE = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
SCREAMING_SNAKE_CASE = cha + cha
if sequence in two_char_strings:
SCREAMING_SNAKE_CASE = two_char_strings[sequence]
SCREAMING_SNAKE_CASE = int(__snake_case ) / all_sum
my_sec_sum += prob * math.loga(__snake_case )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __lowerCAmelCase ( _UpperCamelCase : str ) -> tuple[dict, dict]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Counter() # type: ignore
SCREAMING_SNAKE_CASE = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ( ) -> str:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 713 |
def __lowerCAmelCase ( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = str(_UpperCamelCase )
SCREAMING_SNAKE_CASE = list(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
for i in list_num:
sum_of_num += int(_UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
a_ : List[str] = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
a_ : int = solution(power)
print("Sum of the digits is: ", result)
| 673 | 0 |
import os
a_ : Union[str, Any] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
while index < len(a__ ) - 1:
SCREAMING_SNAKE_CASE = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowerCAmelCase ( _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = num // 10_00
numerals += m_count * "M"
num %= 10_00
SCREAMING_SNAKE_CASE = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
SCREAMING_SNAKE_CASE = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowerCAmelCase ( _UpperCamelCase : List[str] = "/p089_roman.txt" ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
with open(os.path.dirname(a__ ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE = line.strip()
SCREAMING_SNAKE_CASE = parse_roman_numerals(a__ )
SCREAMING_SNAKE_CASE = generate_roman_numerals(a__ )
savings += len(a__ ) - len(a__ )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 714 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="facebook/bart-large-mnli"
__UpperCamelCase =(
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
__UpperCamelCase ="text_classifier"
__UpperCamelCase =AutoTokenizer
__UpperCamelCase =AutoModelForSequenceClassification
__UpperCamelCase =["text", ["text"]]
__UpperCamelCase =["text"]
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
SCREAMING_SNAKE_CASE = int(snake_case__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(snake_case__ ) , [F"""This example is {label}""" for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 673 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def __lowerCAmelCase ( _UpperCamelCase : Iterable[str] , _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = iter(UpperCamelCase__ )
while True:
SCREAMING_SNAKE_CASE = tuple(itertools.islice(UpperCamelCase__ , UpperCamelCase__ ) )
if not chunk:
return
yield chunk
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE = ''
if len(UpperCamelCase__ ) < 2:
return dirty
for i in range(len(UpperCamelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCamelCase__ ) & 1:
clean += "X"
return clean
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCamelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCamelCase__ )
return table
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = generate_table(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = prepare_input(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(table.index(UpperCamelCase__ ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = generate_table(UpperCamelCase__ )
SCREAMING_SNAKE_CASE = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCamelCase__ , 2 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(table.index(UpperCamelCase__ ) , 5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = divmod(table.index(UpperCamelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 715 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
a_ : str = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
a_ : int = {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
a_ : str = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a_ : List[Any] = "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
a_ : Any = "allenai"
def __lowerCAmelCase ( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = dict((re.sub(R'@@$' , '' , _UpperCamelCase ), v) if k.endswith('@@' ) else (re.sub(R'$' , '</w>' , _UpperCamelCase ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
SCREAMING_SNAKE_CASE = d[k] # restore
return da
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
assert os.path.exists(_UpperCamelCase )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE = cls.hub_models()
SCREAMING_SNAKE_CASE = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
SCREAMING_SNAKE_CASE = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"""using checkpoint {checkpoint_file}""" )
SCREAMING_SNAKE_CASE = hub_utils.from_pretrained(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , archive_map=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vars(chkpt['args']['model'] )
SCREAMING_SNAKE_CASE = args['source_lang']
SCREAMING_SNAKE_CASE = args['target_lang']
SCREAMING_SNAKE_CASE = dirname(_UpperCamelCase )
SCREAMING_SNAKE_CASE = basename(_UpperCamelCase )
# dicts
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{src_lang}.txt""" )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , f"""dict.{tgt_lang}.txt""" )
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-src.json' )
print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE = False
break
SCREAMING_SNAKE_CASE = Dictionary.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'vocab-tgt.json' )
print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ):
break
with open(_UpperCamelCase , encoding='utf-8' ) as fin:
SCREAMING_SNAKE_CASE = fin.read()
SCREAMING_SNAKE_CASE = re.sub(R' \d+$' , '' , _UpperCamelCase , 0 , re.M ) # remove frequency number
print(f"""Generating {merges_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_UpperCamelCase )
# model config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args['bpe']}"""
assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args['tokenizer']}"""
SCREAMING_SNAKE_CASE = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE = 5
SCREAMING_SNAKE_CASE = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE = best_score_hparams[model_dir]['length_penalty']
else:
SCREAMING_SNAKE_CASE = 1.0
print(f"""Generating {fsmt_model_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# tokenizer config
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(f"""Generating {fsmt_tokenizer_config_file}""" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_UpperCamelCase , ensure_ascii=_UpperCamelCase , indent=_UpperCamelCase ) )
# model
SCREAMING_SNAKE_CASE = chkpt['models'][0]
SCREAMING_SNAKE_CASE = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTConfig.from_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration(_UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
# save
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_UpperCamelCase , _UpperCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(f"""cd {data_root}""" )
print(f"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
a_ : int = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 673 | 0 |
import heapq
import sys
import numpy as np
a_ : List[Any] = tuple[int, int]
class UpperCamelCase :
def __init__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : int ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Dict , snake_case__ : Tuple , snake_case__ : Any ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__SCREAMING_SNAKE_CASE )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : List[Any] , snake_case__ : int ):
"""simple docstring"""
if item in self.set:
self.set.remove(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(__SCREAMING_SNAKE_CASE )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCAmelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
return consistent_heuristic(_UpperCAmelCase , _UpperCAmelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str ) -> int:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCAmelCase , _UpperCAmelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCAmelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : str ) -> int:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , ) -> int:
'''simple docstring'''
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase , key(_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCAmelCase ):
if key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : List[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Any = blocks_blk
# hyper parameters
a_ : Union[str, Any] = 1
a_ : List[Any] = 1
a_ : List[Any] = 20
a_ : List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : Union[str, Any] = (0, 0)
a_ : Tuple = (n - 1, n - 1)
a_ : Optional[int] = 1
def __lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_anchor.append(_UpperCAmelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 716 |
import random
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : bool = False ) -> dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {i: [] for i in range(_UpperCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_UpperCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_UpperCamelCase ):
for j in range(i + 1 , _UpperCamelCase ):
if random.random() < probability:
graph[i].append(_UpperCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_UpperCamelCase )
return graph
def __lowerCAmelCase ( _UpperCamelCase : int ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_UpperCamelCase ) if i != j] for i in range(_UpperCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 673 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class UpperCamelCase ( UpperCamelCase_ ):
def __init__( self : int , snake_case__ : Dict = 1_0_1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = length
def __len__( self : List[Any] ):
"""simple docstring"""
return self.length
def __getitem__( self : Union[str, Any] , snake_case__ : Tuple ):
"""simple docstring"""
return i
class UpperCamelCase :
def __call__( self : Dict , snake_case__ : List[str] ):
"""simple docstring"""
return {"input_ids": torch.tensor(__a ), "labels": torch.tensor(__a )}
class UpperCamelCase ( nn.Module ):
def __init__( self : Any ):
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
SCREAMING_SNAKE_CASE = nn.Linear(1_2_0 , 8_0 )
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Union[str, Any]=None ):
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class UpperCamelCase ( UpperCamelCase_ ):
@require_torch_neuroncore
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = F"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""--output_dir {output_dir}""".split()
SCREAMING_SNAKE_CASE = ['torchrun'] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class UpperCamelCase ( UpperCamelCase_ ):
@require_torch_multi_gpu
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = F"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE = F"""--output_dir {output_dir}""".split()
SCREAMING_SNAKE_CASE = ['torchrun'] + distributed_args + args
execute_subprocess_async(__a , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
a_ : Optional[int] = HfArgumentParser((TrainingArguments,))
a_ : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
a_ : Dict = DummyDataset(dataset_length)
def __lowerCAmelCase ( _UpperCamelCase : EvalPrediction ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(range(len(_UpperCamelCase ) ) )
SCREAMING_SNAKE_CASE = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
a_ : Any = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
a_ : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a_ : List[Any] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a_ : List[Any] = 2
a_ : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a_ : List[str] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a_ : Optional[Any] = None
| 717 |
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any]=1_3 , snake_case__ : Union[str, Any]=7 , snake_case__ : List[str]=True , snake_case__ : Any=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : str=3_2 , snake_case__ : Dict=5 , snake_case__ : str=4 , snake_case__ : int=3_7 , snake_case__ : Union[str, Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[Any]=1_6 , snake_case__ : str=2 , snake_case__ : int=0.02 , snake_case__ : List[str]=3 , snake_case__ : Dict=4 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : List[Any] , snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_choices
SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE = model(
snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'the [MASK] of Belgium is Brussels'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
SCREAMING_SNAKE_CASE = tokenizer(snake_case__ , return_tensors='pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case__ ) , 'capital' )
| 673 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , _lowercase ).groups()[0]
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : Optional[int] , snake_case__ : Tuple=None , snake_case__ : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = file_names
SCREAMING_SNAKE_CASE = image_transform
SCREAMING_SNAKE_CASE = label_to_id
def __len__( self : Dict ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self : Union[str, Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.file_names[idx]
SCREAMING_SNAKE_CASE = PIL.Image.open(__A )
SCREAMING_SNAKE_CASE = raw_image.convert('RGB' )
if self.image_transform is not None:
SCREAMING_SNAKE_CASE = self.image_transform(__A )
SCREAMING_SNAKE_CASE = extract_label(__A )
if self.label_to_id is not None:
SCREAMING_SNAKE_CASE = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
if args.with_tracking:
SCREAMING_SNAKE_CASE = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE = config['lr']
SCREAMING_SNAKE_CASE = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE = int(config['seed'] )
SCREAMING_SNAKE_CASE = int(config['batch_size'] )
SCREAMING_SNAKE_CASE = config['image_size']
if not isinstance(_lowercase , (list, tuple) ):
SCREAMING_SNAKE_CASE = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
SCREAMING_SNAKE_CASE = int(args.checkpointing_steps )
else:
raise ValueError(
f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
SCREAMING_SNAKE_CASE = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
SCREAMING_SNAKE_CASE = os.path.split(_lowercase )[-1].split('.' )[0]
accelerator.init_trackers(_lowercase , _lowercase )
# Grab all the image filenames
SCREAMING_SNAKE_CASE = [os.path.join(args.data_dir , _lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
SCREAMING_SNAKE_CASE = [extract_label(_lowercase ) for fname in file_names]
SCREAMING_SNAKE_CASE = list(set(_lowercase ) )
id_to_label.sort()
SCREAMING_SNAKE_CASE = {lbl: i for i, lbl in enumerate(_lowercase )}
# Set the seed before splitting the data.
np.random.seed(_lowercase )
torch.manual_seed(_lowercase )
torch.cuda.manual_seed_all(_lowercase )
# Split our filenames between train and validation
SCREAMING_SNAKE_CASE = np.random.permutation(len(_lowercase ) )
SCREAMING_SNAKE_CASE = int(0.8 * len(_lowercase ) )
SCREAMING_SNAKE_CASE = random_perm[:cut]
SCREAMING_SNAKE_CASE = random_perm[cut:]
# For training we use a simple RandomResizedCrop
SCREAMING_SNAKE_CASE = Compose([RandomResizedCrop(_lowercase , scale=(0.5, 1.0) ), ToTensor()] )
SCREAMING_SNAKE_CASE = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_lowercase , label_to_id=_lowercase )
# For evaluation, we use a deterministic Resize
SCREAMING_SNAKE_CASE = Compose([Resize(_lowercase ), ToTensor()] )
SCREAMING_SNAKE_CASE = PetsDataset([file_names[i] for i in eval_split] , image_transform=_lowercase , label_to_id=_lowercase )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
SCREAMING_SNAKE_CASE = DataLoader(_lowercase , shuffle=_lowercase , batch_size=_lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE = create_model('resnet50d' , pretrained=_lowercase , num_classes=len(_lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
SCREAMING_SNAKE_CASE = False
for param in model.get_classifier().parameters():
SCREAMING_SNAKE_CASE = True
# We normalize the batches of images to be a bit faster.
SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
SCREAMING_SNAKE_CASE = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
SCREAMING_SNAKE_CASE = OneCycleLR(optimizer=_lowercase , max_lr=_lowercase , epochs=_lowercase , steps_per_epoch=len(_lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE = 0
# We also need to keep track of the starting epoch so files are named properly
SCREAMING_SNAKE_CASE = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
SCREAMING_SNAKE_CASE = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
SCREAMING_SNAKE_CASE = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
SCREAMING_SNAKE_CASE = os.path.splitext(_lowercase )[0]
if "epoch" in training_difference:
SCREAMING_SNAKE_CASE = int(training_difference.replace('epoch_' , '' ) ) + 1
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = int(training_difference.replace('step_' , '' ) )
SCREAMING_SNAKE_CASE = resume_step // len(_lowercase )
resume_step -= starting_epoch * len(_lowercase )
# Now we train the model
for epoch in range(_lowercase , _lowercase ):
model.train()
if args.with_tracking:
SCREAMING_SNAKE_CASE = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
SCREAMING_SNAKE_CASE = accelerator.skip_first_batches(_lowercase , _lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
SCREAMING_SNAKE_CASE = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE = (batch['image'] - mean) / std
SCREAMING_SNAKE_CASE = model(_lowercase )
SCREAMING_SNAKE_CASE = torch.nn.functional.cross_entropy(_lowercase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE = f"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
SCREAMING_SNAKE_CASE = {k: v.to(accelerator.device ) for k, v in batch.items()}
SCREAMING_SNAKE_CASE = (batch['image'] - mean) / std
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_lowercase )
SCREAMING_SNAKE_CASE = outputs.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = accelerator.gather_for_metrics((predictions, batch['label']) )
SCREAMING_SNAKE_CASE = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
SCREAMING_SNAKE_CASE = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}: {1_00 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_00 * eval_metric,
'train_loss': total_loss.item() / len(_lowercase ),
'epoch': epoch,
} , step=_lowercase , )
if checkpointing_steps == "epoch":
SCREAMING_SNAKE_CASE = f"""epoch_{epoch}"""
if args.output_dir is not None:
SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_lowercase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_lowercase , default=_lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_lowercase , default=_lowercase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_lowercase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_lowercase , default=_lowercase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_lowercase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( __snake_case , unittest.TestCase ):
__UpperCamelCase =LDMTextToImagePipeline
__UpperCamelCase =TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
__UpperCamelCase =PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCamelCase =TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase =False
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(_lowercase )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def UpperCamelCase ( self : Any , snake_case__ : List[str] , snake_case__ : List[str]=0 ):
"""simple docstring"""
if str(_lowercase ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(_lowercase )
SCREAMING_SNAKE_CASE = pipe(**_lowercase ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
SCREAMING_SNAKE_CASE = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Optional[int]=torch.floataa , snake_case__ : int=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.manual_seed(_lowercase )
SCREAMING_SNAKE_CASE = np.random.RandomState(_lowercase ).standard_normal((1, 4, 3_2, 3_2) )
SCREAMING_SNAKE_CASE = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE = self.get_inputs(_lowercase )
SCREAMING_SNAKE_CASE = pipe(**_lowercase ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
SCREAMING_SNAKE_CASE = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Optional[int] , snake_case__ : Any , snake_case__ : Dict=torch.floataa , snake_case__ : Union[str, Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.manual_seed(_lowercase )
SCREAMING_SNAKE_CASE = np.random.RandomState(_lowercase ).standard_normal((1, 4, 3_2, 3_2) )
SCREAMING_SNAKE_CASE = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 5_0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE = self.get_inputs(_lowercase )
SCREAMING_SNAKE_CASE = pipe(**_lowercase ).images[0]
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
SCREAMING_SNAKE_CASE = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 719 |
import heapq
import sys
import numpy as np
a_ : Optional[int] = tuple[int, int]
class UpperCamelCase :
def __init__( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return len(self.elements ) == 0
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(snake_case__ )
else:
# update
# print("update", item)
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
if item in self.set:
self.set.remove(snake_case__ )
SCREAMING_SNAKE_CASE = []
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
return self.elements[0][1]
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = heapq.heappop(self.elements )
self.set.remove(snake_case__ )
return (priority, item)
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
SCREAMING_SNAKE_CASE = np.array(_UpperCamelCase )
return np.linalg.norm(a - b )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Dict:
'''simple docstring'''
return consistent_heuristic(_UpperCamelCase , _UpperCamelCase ) // t
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos ) -> Optional[int]:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : int , _UpperCamelCase : TPos , _UpperCamelCase : dict[TPos, float] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = g_function[start] + Wa * heuristics[i](_UpperCamelCase , _UpperCamelCase )
return ans
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = np.chararray((n, n) )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
SCREAMING_SNAKE_CASE = '*'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (j, (n - 1) - i) in blocks:
SCREAMING_SNAKE_CASE = '#'
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = x
# print(x)
SCREAMING_SNAKE_CASE = '-'
SCREAMING_SNAKE_CASE = back_pointer[x]
SCREAMING_SNAKE_CASE = '-'
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
SCREAMING_SNAKE_CASE = back_pointer[goal]
while x != start:
print(_UpperCamelCase , end=' ' )
SCREAMING_SNAKE_CASE = back_pointer[x]
print(_UpperCamelCase )
sys.exit()
def __lowerCAmelCase ( _UpperCamelCase : TPos ) -> Any:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , ) -> List[Any]:
'''simple docstring'''
for itera in range(_UpperCamelCase ):
open_list[itera].remove_element(_UpperCamelCase )
# print("s", s)
# print("j", j)
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = s
SCREAMING_SNAKE_CASE = (x - 1, y)
SCREAMING_SNAKE_CASE = (x + 1, y)
SCREAMING_SNAKE_CASE = (x, y + 1)
SCREAMING_SNAKE_CASE = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCamelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCamelCase )
SCREAMING_SNAKE_CASE = -1
SCREAMING_SNAKE_CASE = float('inf' )
if valid(_UpperCamelCase ) and g_function[neighbours] > g_function[s] + 1:
SCREAMING_SNAKE_CASE = g_function[s] + 1
SCREAMING_SNAKE_CASE = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCamelCase , key(_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCamelCase ):
if key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) <= Wa * key(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase ):
open_list[j].put(
_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
a_ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
a_ : List[str] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
a_ : Union[str, Any] = make_common_ground()
a_ : Tuple = blocks_blk
# hyper parameters
a_ : Any = 1
a_ : List[str] = 1
a_ : Union[str, Any] = 20
a_ : Optional[Any] = 3 # one consistent and two other inconsistent
# start and end destination
a_ : int = (0, 0)
a_ : Optional[int] = (n - 1, n - 1)
a_ : Union[str, Any] = 1
def __lowerCAmelCase ( _UpperCamelCase : TPos , _UpperCamelCase : TPos , _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {start: 0, goal: float('inf' )}
SCREAMING_SNAKE_CASE = {start: -1, goal: -1}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = set()
for i in range(_UpperCamelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCamelCase , key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , _UpperCamelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = open_list[i].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_inad.append(_UpperCamelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = open_list[0].top_show()
visited.add(_UpperCamelCase )
expand_state(
_UpperCamelCase , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
close_list_anchor.append(_UpperCamelCase )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCamelCase ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 673 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a_ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =PegasusTokenizer
__UpperCamelCase =PegasusTokenizerFast
__UpperCamelCase =True
__UpperCamelCase =True
def UpperCamelCase ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = PegasusTokenizer(A__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def UpperCamelCase ( self : List[str] , **snake_case__ : Tuple ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self : int , snake_case__ : List[Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = """</s>"""
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__ ) , A__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__ ) , A__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(A__ ) , 1_1_0_3 )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] , return_tensors=A__ , add_special_tokens=A__ ).input_ids[0]
SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] , return_tensors=A__ , add_special_tokens=A__ ).input_ids[0]
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
SCREAMING_SNAKE_CASE = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] , return_tensors=A__ ).input_ids[0]
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
SCREAMING_SNAKE_CASE = """To ensure a smooth flow of bank resolutions."""
SCREAMING_SNAKE_CASE = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
SCREAMING_SNAKE_CASE = tokenizer([raw_input_str] , return_tensors=A__ ).input_ids[0]
self.assertListEqual(A__ , A__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ["""This is going to be way too long.""" * 1_5_0, """short example"""]
SCREAMING_SNAKE_CASE = ["""not super long but more than 5 tokens""", """tiny"""]
SCREAMING_SNAKE_CASE = self._large_tokenizer(A__ , padding=A__ , truncation=A__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = self._large_tokenizer(
text_target=A__ , max_length=5 , padding=A__ , truncation=A__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A__ ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {"""input_ids""": [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =PegasusTokenizer
__UpperCamelCase =PegasusTokenizerFast
__UpperCamelCase =True
__UpperCamelCase =True
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = PegasusTokenizer(A__ , offset=0 , mask_token_sent=A__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def UpperCamelCase ( self : str , **snake_case__ : Optional[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self : str , snake_case__ : Optional[Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
SCREAMING_SNAKE_CASE = rust_tokenizer([raw_input_str] , return_tensors=A__ , add_special_tokens=A__ ).input_ids[0]
SCREAMING_SNAKE_CASE = py_tokenizer([raw_input_str] , return_tensors=A__ , add_special_tokens=A__ ).input_ids[0]
self.assertListEqual(A__ , A__ )
@require_torch
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ["""This is going to be way too long.""" * 1_0_0_0, """short example"""]
SCREAMING_SNAKE_CASE = ["""not super long but more than 5 tokens""", """tiny"""]
SCREAMING_SNAKE_CASE = self._large_tokenizer(A__ , padding=A__ , truncation=A__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE = self._large_tokenizer(
text_target=A__ , max_length=5 , padding=A__ , truncation=A__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A__ ) == 2 # input_ids, attention_mask.
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
SCREAMING_SNAKE_CASE = self._large_tokenizer(A__ ).input_ids
self.assertListEqual(
A__ , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 720 |
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
a_ : str = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __lowerCAmelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Any=8 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any]=5_12 , _UpperCamelCase : Union[str, Any]=5_12 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
SCREAMING_SNAKE_CASE = np.array(pil_image.convert('RGB' ) )
SCREAMING_SNAKE_CASE = arr.astype(np.floataa ) / 1_27.5 - 1
SCREAMING_SNAKE_CASE = np.transpose(_UpperCamelCase , [2, 0, 1] )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).unsqueeze(0 )
return image
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : int , snake_case__ : UNetaDConditionModel , snake_case__ : DDPMScheduler , snake_case__ : VQModel , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=snake_case__ , scheduler=snake_case__ , movq=snake_case__ , )
SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = min(int(num_inference_steps * strength ) , snake_case__ )
SCREAMING_SNAKE_CASE = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase ( self : List[str] , snake_case__ : Dict , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : str=None ):
"""simple docstring"""
if not isinstance(snake_case__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(snake_case__ )}""" )
SCREAMING_SNAKE_CASE = image.to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
if image.shape[1] == 4:
SCREAMING_SNAKE_CASE = image
else:
if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(snake_case__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ )
]
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
else:
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ ).latent_dist.sample(snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.config.scaling_factor * init_latents
SCREAMING_SNAKE_CASE = torch.cat([init_latents] , dim=0 )
SCREAMING_SNAKE_CASE = init_latents.shape
SCREAMING_SNAKE_CASE = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ )
# get latents
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = init_latents
return latents
def UpperCamelCase ( self : int , snake_case__ : List[str]=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Optional[int]=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
SCREAMING_SNAKE_CASE = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=snake_case__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cpu_offload_with_hook(snake_case__ , snake_case__ , prev_module_hook=snake_case__ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case__ )
def __call__( self : str , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , snake_case__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , snake_case__ : int = 5_1_2 , snake_case__ : int = 5_1_2 , snake_case__ : int = 1_0_0 , snake_case__ : float = 4.0 , snake_case__ : float = 0.3 , snake_case__ : int = 1 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._execution_device
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = image_embeds.shape[0]
if isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = torch.cat(snake_case__ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(snake_case__ , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case__ )
if not isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE = [image]
if not all(isinstance(snake_case__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(snake_case__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
SCREAMING_SNAKE_CASE = torch.cat([prepare_image(snake_case__ , snake_case__ , snake_case__ ) for i in image] , dim=0 )
SCREAMING_SNAKE_CASE = image.to(dtype=image_embeds.dtype , device=snake_case__ )
SCREAMING_SNAKE_CASE = self.movq.encode(snake_case__ )['latents']
SCREAMING_SNAKE_CASE = latents.repeat_interleave(snake_case__ , dim=0 )
self.scheduler.set_timesteps(snake_case__ , device=snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_timesteps(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE = timesteps[:1].repeat(batch_size * num_images_per_prompt )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = downscale_height_and_width(snake_case__ , snake_case__ , self.movq_scale_factor )
SCREAMING_SNAKE_CASE = self.prepare_latents(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , image_embeds.dtype , snake_case__ , snake_case__ )
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = {'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE = self.unet(
sample=snake_case__ , timestep=snake_case__ , encoder_hidden_states=snake_case__ , added_cond_kwargs=snake_case__ , return_dict=snake_case__ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(
snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ , )[0]
# post-processing
SCREAMING_SNAKE_CASE = self.movq.decode(snake_case__ , force_not_quantize=snake_case__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case__ )
| 673 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
__UpperCamelCase =field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
__UpperCamelCase =Features({"text": Value("string" )} )
__UpperCamelCase =Features({"labels": ClassLabel} )
__UpperCamelCase ="text"
__UpperCamelCase ="labels"
def UpperCamelCase ( self : Dict , snake_case__ : Optional[Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _a ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE = copy.deepcopy(self )
SCREAMING_SNAKE_CASE = self.label_schema.copy()
SCREAMING_SNAKE_CASE = features[self.label_column]
SCREAMING_SNAKE_CASE = label_schema
return task_template
@property
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 721 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger("transformers.models.speecht5")
def __lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""]
SCREAMING_SNAKE_CASE = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""]
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_g']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.weight_v']
SCREAMING_SNAKE_CASE = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : List[str]=None , ) -> Tuple:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.load(_UpperCamelCase )
load_weights(orig_checkpoint['model']['generator'] , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = np.load(_UpperCamelCase )
SCREAMING_SNAKE_CASE = stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE = stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
SCREAMING_SNAKE_CASE = torch.from_numpy(_UpperCamelCase ).float()
model.save_pretrained(_UpperCamelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 673 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Any=7 , snake_case__ : int=3 , snake_case__ : Any=3_0 , snake_case__ : int=4_0_0 , snake_case__ : int=True , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=True , snake_case__ : int=1 / 2_5_5 , snake_case__ : Tuple=True , snake_case__ : Any=[0.5, 0.5, 0.5] , snake_case__ : str=[0.5, 0.5, 0.5] , snake_case__ : Tuple=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
SCREAMING_SNAKE_CASE = do_pad
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self : int , snake_case__ : List[str] , snake_case__ : int=False ):
"""simple docstring"""
if not batched:
SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE = int(self.size['shortest_edge'] * h / w )
SCREAMING_SNAKE_CASE = self.size['shortest_edge']
elif w > h:
SCREAMING_SNAKE_CASE = self.size['shortest_edge']
SCREAMING_SNAKE_CASE = int(self.size['shortest_edge'] * w / h )
else:
SCREAMING_SNAKE_CASE = self.size['shortest_edge']
SCREAMING_SNAKE_CASE = self.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE = []
for image in image_inputs:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
SCREAMING_SNAKE_CASE = max(UpperCamelCase__ , key=lambda snake_case__ : item[0] )[0]
SCREAMING_SNAKE_CASE = max(UpperCamelCase__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =DetrImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = DetrImageProcessingTester(self )
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_rescale' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'rescale_factor' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'size' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_pad' ) )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
SCREAMING_SNAKE_CASE = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
SCREAMING_SNAKE_CASE = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
SCREAMING_SNAKE_CASE = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase__ ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase__ ) )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase__ ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase__ ) )
@slow
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
SCREAMING_SNAKE_CASE = json.loads(f.read() )
SCREAMING_SNAKE_CASE = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
SCREAMING_SNAKE_CASE = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
SCREAMING_SNAKE_CASE = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
SCREAMING_SNAKE_CASE = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors='pt' )
# verify pixel values
SCREAMING_SNAKE_CASE = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase__ ) )
# verify boxes
SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
SCREAMING_SNAKE_CASE = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase__ ) )
# verify is_crowd
SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase__ ) )
# verify class_labels
SCREAMING_SNAKE_CASE = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase__ ) )
# verify masks
SCREAMING_SNAKE_CASE = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCamelCase__ )
# verify orig_size
SCREAMING_SNAKE_CASE = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase__ ) )
# verify size
SCREAMING_SNAKE_CASE = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase__ ) )
| 700 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
a_ : List[Any] = {
"allenai/led-base-16384": 1_6384,
}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =LEDTokenizer
__UpperCamelCase =["input_ids", "attention_mask"]
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[str]=None , snake_case__ : Dict="replace" , snake_case__ : Tuple="<s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : int="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : List[str]="<mask>" , snake_case__ : List[Any]=False , snake_case__ : int=True , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = pre_tok_class(**snake_case__ )
SCREAMING_SNAKE_CASE = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE = 'post_processor'
SCREAMING_SNAKE_CASE = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE = tuple(state['sep'] )
if "cls" in state:
SCREAMING_SNAKE_CASE = tuple(state['cls'] )
SCREAMING_SNAKE_CASE = False
if state.get('add_prefix_space' , snake_case__ ) != add_prefix_space:
SCREAMING_SNAKE_CASE = add_prefix_space
SCREAMING_SNAKE_CASE = True
if state.get('trim_offsets' , snake_case__ ) != trim_offsets:
SCREAMING_SNAKE_CASE = trim_offsets
SCREAMING_SNAKE_CASE = True
if changes_to_apply:
SCREAMING_SNAKE_CASE = getattr(snake_case__ , state.pop('type' ) )
SCREAMING_SNAKE_CASE = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
SCREAMING_SNAKE_CASE = value
def UpperCamelCase ( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : List[str] , *snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = kwargs.get('is_split_into_words' , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Tuple=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE = len(encoded_inputs['global_attention_mask'] ) != len(snake_case__ )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE = len(snake_case__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 673 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase :
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : str=2 , snake_case__ : Dict=3 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[int]=7 , snake_case__ : Optional[int]=True , snake_case__ : str=True , snake_case__ : Dict=True , snake_case__ : int=True , snake_case__ : Optional[int]=9_9 , snake_case__ : str=3_6 , snake_case__ : Any=2 , snake_case__ : int=4 , snake_case__ : str=3_7 , snake_case__ : Any="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : Tuple=1_6 , snake_case__ : Any=2 , snake_case__ : List[str]=0.02 , snake_case__ : Union[str, Any]=6 , snake_case__ : str=6 , snake_case__ : Union[str, Any]=3 , snake_case__ : int=4 , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1_0_0_0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_token_type_ids
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = coordinate_size
SCREAMING_SNAKE_CASE = shape_size
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = num_choices
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE = text_seq_length
SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE = self.text_seq_length + self.image_seq_length
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
SCREAMING_SNAKE_CASE = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE = bbox[i, j, 3]
SCREAMING_SNAKE_CASE = bbox[i, j, 1]
SCREAMING_SNAKE_CASE = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE = bbox[i, j, 2]
SCREAMING_SNAKE_CASE = bbox[i, j, 0]
SCREAMING_SNAKE_CASE = tmp_coordinate
SCREAMING_SNAKE_CASE = tf.constant(snake_case__ )
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase ( self : Dict , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFLayoutLMvaModel(config=snake_case__ )
# text + image
SCREAMING_SNAKE_CASE = model(snake_case__ , pixel_values=snake_case__ , training=snake_case__ )
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , training=snake_case__ , )
SCREAMING_SNAKE_CASE = model(snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE = model({'pixel_values': pixel_values} , training=snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase ( self : str , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFLayoutLMvaForSequenceClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = TFLayoutLMvaForTokenClassification(config=snake_case__ )
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , training=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase ( self : int , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = TFLayoutLMvaForQuestionAnswering(config=snake_case__ )
SCREAMING_SNAKE_CASE = model(
snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , training=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCamelCase =(
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase =(
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
def UpperCamelCase ( self : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
return True
def UpperCamelCase ( self : int , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Dict=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = copy.deepcopy(snake_case__ )
if model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = {
k: tf.tile(tf.expand_dims(snake_case__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(snake_case__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
SCREAMING_SNAKE_CASE = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(snake_case__ ):
SCREAMING_SNAKE_CASE = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFLayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def UpperCamelCase ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(snake_case__ )
if getattr(snake_case__ , 'hf_compute_loss' , snake_case__ ):
# The number of elements in the loss should be the same as the number of elements in the label
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
SCREAMING_SNAKE_CASE = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=snake_case__ )[0]
]
SCREAMING_SNAKE_CASE = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
SCREAMING_SNAKE_CASE = prepared_for_class.pop('input_ids' )
SCREAMING_SNAKE_CASE = model(snake_case__ , **snake_case__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
SCREAMING_SNAKE_CASE = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
SCREAMING_SNAKE_CASE = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
SCREAMING_SNAKE_CASE = -1_0_0
SCREAMING_SNAKE_CASE = tf.convert_to_tensor(snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ , **snake_case__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
SCREAMING_SNAKE_CASE = model(snake_case__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
SCREAMING_SNAKE_CASE = self._prepare_for_class(inputs_dict.copy() , snake_case__ , return_labels=snake_case__ )
# Get keys that were added with the _prepare_for_class function
SCREAMING_SNAKE_CASE = prepared_for_class.keys() - inputs_dict.keys()
SCREAMING_SNAKE_CASE = inspect.signature(model.call ).parameters
SCREAMING_SNAKE_CASE = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
SCREAMING_SNAKE_CASE = {0: 'input_ids'}
for label_key in label_keys:
SCREAMING_SNAKE_CASE = signature_names.index(snake_case__ )
SCREAMING_SNAKE_CASE = label_key
SCREAMING_SNAKE_CASE = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
SCREAMING_SNAKE_CASE = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
SCREAMING_SNAKE_CASE = prepared_for_class[value]
SCREAMING_SNAKE_CASE = tuple(snake_case__ )
# Send to model
SCREAMING_SNAKE_CASE = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
@slow
def UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFLayoutLMvaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def __lowerCAmelCase ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=snake_case__ ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=snake_case__ , return_tensors='tf' ).pixel_values
SCREAMING_SNAKE_CASE = tf.constant([[1, 2]] )
SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
SCREAMING_SNAKE_CASE = model(input_ids=snake_case__ , bbox=snake_case__ , pixel_values=snake_case__ , training=snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE = (1, 1_9_9, 7_6_8)
self.assertEqual(outputs.last_hidden_state.shape , snake_case__ )
SCREAMING_SNAKE_CASE = tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case__ , atol=1E-4 ) )
| 701 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = "▁"
a_ : str = {"vocab_file": "sentencepiece.bpe.model"}
a_ : Optional[Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
a_ : str = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
a_ : Dict = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCamelCase ( _UpperCAmelCase ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =["input_ids", "attention_mask"]
__UpperCamelCase =[]
__UpperCamelCase =[]
def __init__( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str="<s>" , snake_case__ : Any="</s>" , snake_case__ : Any="</s>" , snake_case__ : int="<s>" , snake_case__ : Union[str, Any]="<unk>" , snake_case__ : str="<pad>" , snake_case__ : List[Any]="<mask>" , snake_case__ : str=None , snake_case__ : List[str]=None , snake_case__ : Any=None , snake_case__ : Optional[int] = None , snake_case__ : int=None , snake_case__ : str=False , **snake_case__ : Optional[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE = legacy_behaviour
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , tokenizer_file=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowercase__ , **lowercase__ , )
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = len(self.sp_model )
SCREAMING_SNAKE_CASE = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
SCREAMING_SNAKE_CASE = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else "eng_Latn"
SCREAMING_SNAKE_CASE = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[str] , snake_case__ : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self : Tuple , snake_case__ : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Tuple = None , snake_case__ : Optional[Any] = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def UpperCamelCase ( self : Any , snake_case__ : List[str] , snake_case__ : Tuple = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self : int , snake_case__ : List[Any] , snake_case__ : Optional[int] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , **snake_case__ : int ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(lowercase__ )
SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self : List[str] , snake_case__ : str ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = "".join(lowercase__ ).replace(lowercase__ , ' ' ).strip()
return out_string
def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : List[Any] = None ):
"""simple docstring"""
if not os.path.isdir(lowercase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE = os.path.join(
lowercase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def UpperCamelCase ( self : Any , snake_case__ : List[Any] , snake_case__ : Optional[int] = "eng_Latn" , snake_case__ : Dict = None , snake_case__ : Union[str, Any] = "fra_Latn" , **snake_case__ : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = src_lang
SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self : Any ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self : List[Any] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE = [self.cur_lang_code]
SCREAMING_SNAKE_CASE = [self.eos_token_id]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.lang_code_to_id[lang]
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE = [self.cur_lang_code]
SCREAMING_SNAKE_CASE = [self.eos_token_id]
| 702 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
a_ : Any = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
a_ : Union[str, Any] = {"allegro/herbert-base-cased": 514}
a_ : List[Any] = {}
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase =VOCAB_FILES_NAMES
__UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase =PRETRAINED_INIT_CONFIGURATION
__UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase =HerbertTokenizer
def __init__( self : Tuple , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]=None , snake_case__ : str="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : List[str]="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Dict="</s>" , **snake_case__ : List[str] , ):
"""simple docstring"""
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , sep_token=snake_case__ , **snake_case__ , )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.cls_token_id]
SCREAMING_SNAKE_CASE = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 673 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
SCREAMING_SNAKE_CASE = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod() | 703 |
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
SCREAMING_SNAKE_CASE = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = abs(_UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCAmelCase ( _UpperCamelCase : int ) -> int:
'''simple docstring'''
return sum(int(_UpperCamelCase ) for c in str(abs(_UpperCamelCase ) ) )
def __lowerCAmelCase ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None:
SCREAMING_SNAKE_CASE = f"""{func.__name__}({value})"""
SCREAMING_SNAKE_CASE = timeit(f"""__main__.{call}""" , setup='import __main__' )
print(f"""{call:56} = {func(_UpperCamelCase )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCamelCase , _UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 673 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase :
@staticmethod
def UpperCamelCase ( *snake_case__ : List[str] , **snake_case__ : Tuple ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@require_torch
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE = image_classifier(snake_case__ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(snake_case__ ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case__ ) , [
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
] , )
@require_tf
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE = image_classifier(snake_case__ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(snake_case__ ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case__ ) , [
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
[
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
{'score': 0.333, 'label': ANY(snake_case__ )},
],
] , )
@slow
@require_torch
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE = image_classifier(snake_case__ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case__ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE = image_classifier(snake_case__ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(snake_case__ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
SCREAMING_SNAKE_CASE = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(snake_case__ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 704 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 673 | 0 |
from torch import nn
def __lowerCAmelCase ( _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 705 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__UpperCamelCase =AudioLDMPipeline
__UpperCamelCase =TEXT_TO_AUDIO_PARAMS
__UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS
__UpperCamelCase =frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(3_2, 6_4) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=snake_case__ , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
SCREAMING_SNAKE_CASE = ClapTextModelWithProjection(snake_case__ )
SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=7_7 )
SCREAMING_SNAKE_CASE = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=snake_case__ , )
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : int=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(snake_case__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
SCREAMING_SNAKE_CASE = prompt_embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * ['this is a negative prompt']
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE = []
for p in [prompt, negative_prompt]:
SCREAMING_SNAKE_CASE = audioldm_pipe.tokenizer(
snake_case__ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=snake_case__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE = text_inputs['input_ids'].to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.text_encoder(
snake_case__ , )
SCREAMING_SNAKE_CASE = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
SCREAMING_SNAKE_CASE = F.normalize(snake_case__ , dim=-1 )
embeds.append(snake_case__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = embeds
# forward
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 'egg cracking'
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ , negative_prompt=snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 2_5_6
SCREAMING_SNAKE_CASE = audio[:1_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=snake_case__ )
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=snake_case__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config.sampling_rate
SCREAMING_SNAKE_CASE = self.get_dummy_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.016 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.016
SCREAMING_SNAKE_CASE = audioldm_pipe(audio_length_in_s=0.032 , **snake_case__ )
SCREAMING_SNAKE_CASE = output.audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) / vocoder_sampling_rate == 0.032
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.get_dummy_components()
SCREAMING_SNAKE_CASE = AudioLDMPipeline(**snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = ['hey']
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
assert audio_shape == (1, 2_5_6)
SCREAMING_SNAKE_CASE = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
SCREAMING_SNAKE_CASE = SpeechTaHifiGan(snake_case__ ).to(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(snake_case__ , num_inference_steps=1 )
SCREAMING_SNAKE_CASE = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=snake_case__ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case__ )
@slow
class UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , snake_case__ : int , snake_case__ : Tuple="cpu" , snake_case__ : List[str]=torch.floataa , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
SCREAMING_SNAKE_CASE = np.random.RandomState(snake_case__ ).standard_normal((1, 8, 1_2_8, 1_6) )
SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ ).to(device=snake_case__ , dtype=snake_case__ )
SCREAMING_SNAKE_CASE = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = 2_5
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[7_7_2_3_0:7_7_2_4_0]
SCREAMING_SNAKE_CASE = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
SCREAMING_SNAKE_CASE = audioldm_pipe.to(snake_case__ )
audioldm_pipe.set_progress_bar_config(disable=snake_case__ )
SCREAMING_SNAKE_CASE = self.get_inputs(snake_case__ )
SCREAMING_SNAKE_CASE = audioldm_pipe(**snake_case__ ).audios[0]
assert audio.ndim == 1
assert len(snake_case__ ) == 8_1_9_2_0
SCREAMING_SNAKE_CASE = audio[2_7_7_8_0:2_7_7_9_0]
SCREAMING_SNAKE_CASE = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
SCREAMING_SNAKE_CASE = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 673 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.