code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : int=3 , snake_case_ : Union[str, Any]=32 , snake_case_ : Any=3 , snake_case_ : Optional[int]=10 , snake_case_ : List[str]=[10, 20, 30, 40] , snake_case_ : Tuple=[1, 1, 2, 1] , snake_case_ : Union[str, Any]=True , snake_case_ : List[str]=True , snake_case_ : str="relu" , snake_case_ : Any=3 , snake_case_ : Optional[int]=None , ):
snake_case__ : List[str] = parent
snake_case__ : str = batch_size
snake_case__ : str = image_size
snake_case__ : List[str] = num_channels
snake_case__ : List[str] = embeddings_size
snake_case__ : Dict = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : Dict = is_training
snake_case__ : str = use_labels
snake_case__ : Any = hidden_act
snake_case__ : str = num_labels
snake_case__ : List[str] = scope
snake_case__ : Optional[int] = len(UpperCamelCase__ )
def lowerCamelCase ( self : str ):
snake_case__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : int = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Any ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : str , snake_case_ : int ):
snake_case__ : Union[str, Any] = TFResNetModel(config=UpperCamelCase__ )
snake_case__ : int = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Any , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : int ):
snake_case__ : Union[str, Any] = self.num_labels
snake_case__ : Dict = TFResNetForImageClassification(UpperCamelCase__ )
snake_case__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ : Optional[int] = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
lowercase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : Any ):
snake_case__ : Union[str, Any] = TFResNetModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : int ):
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def lowerCamelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(UpperCamelCase__ )
snake_case__ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase ( self : int ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase ( self : Union[str, Any] ):
def check_hidden_states_output(snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : int ):
snake_case__ : List[str] = model_class(UpperCamelCase__ )
snake_case__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
snake_case__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case__ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case__ : Tuple = layer_type
snake_case__ : Optional[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Union[str, Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase ( self : List[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = TFResNetModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __snake_case( ) -> Union[str, Any]:
snake_case__ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : int ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="""tf""" )
# forward pass
snake_case__ : Optional[int] = model(**UpperCamelCase__ )
# verify the logits
snake_case__ : List[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case__ : Tuple = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCamelCase__ , atol=1E-4 ) )
| 35 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Dict ={
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_A : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
from manim import *
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : List[str] = Rectangle(height=0.5 , width=0.5 )
__lowercase : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowercase : Dict = [mem.copy() for i in range(6 )]
__lowercase : List[str] = [mem.copy() for i in range(6 )]
__lowercase : str = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : List[Any] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : str = VGroup(__a , __a ).arrange(__a , buff=0 )
__lowercase : Any = Text("""CPU""" , font_size=24 )
__lowercase : Union[str, Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
__lowercase : List[Any] = [mem.copy() for i in range(1 )]
__lowercase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : str = Text("""GPU""" , font_size=24 )
__lowercase : List[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.align_to(__a , __a )
gpu.set_x(gpu.get_x() - 1 )
self.add(__a )
__lowercase : Optional[Any] = [mem.copy() for i in range(6 )]
__lowercase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : Dict = Text("""Model""" , font_size=24 )
__lowercase : Optional[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.play(
Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , )
__lowercase : Union[str, Any] = MarkupText(
F"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
__lowercase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase : List[str] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=2.5 ) , Write(__a ) , Write(__a ) )
self.add(__a )
__lowercase : Optional[int] = []
__lowercase : List[str] = []
__lowercase : Dict = []
for i, rect in enumerate(__a ):
__lowercase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
cpu_target.move_to(__a )
cpu_target.generate_target()
__lowercase : Union[str, Any] = 0.46 / 4
__lowercase : Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__a , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__a , buff=0.0 )
cpu_targs.append(__a )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__a ) )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait() | 360 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a ) | 306 | 0 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowerCAmelCase_ :int = precision
lowerCAmelCase_ :Optional[int] = ceil(precision / 1_4 )
lowerCAmelCase_ :List[str] = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
lowerCAmelCase_ :Union[str, Any] = 1
lowerCAmelCase_ :int = 1_3_5_9_1_4_0_9
lowerCAmelCase_ :str = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
lowerCAmelCase_ :Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCAmelCase = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 84 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = ["""NllbTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""NllbTokenizerFast"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 81 | 0 |
'''simple docstring'''
import os
from collections.abc import Iterator
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(__SCREAMING_SNAKE_CASE ):
lowercase_ : Union[str, Any] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__SCREAMING_SNAKE_CASE )[1] in (".py", ".ipynb"):
yield os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).lstrip('''./''' )
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return F'''{i * ' '}*''' if i else "\n##"
def snake_case_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
lowercase_ : Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__SCREAMING_SNAKE_CASE ) or old_parts[i] != new_part) and new_part:
print(F'''{md_prefix(__SCREAMING_SNAKE_CASE )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def snake_case_ ( __SCREAMING_SNAKE_CASE : int = "." ):
"""simple docstring"""
lowercase_ : int = ""
for filepath in sorted(good_file_paths(__SCREAMING_SNAKE_CASE ) ):
lowercase_ : int = os.path.split(__SCREAMING_SNAKE_CASE )
if filepath != old_path:
lowercase_ : Tuple = print_path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase_ : Optional[int] = F'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' )
lowercase_ : Tuple = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F'''{md_prefix(__SCREAMING_SNAKE_CASE )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(".")
| 370 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
assert column_title.isupper()
lowercase_ : Dict = 0
lowercase_ : Tuple = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Optional[int] = 0
while index >= 0:
lowercase_ : Optional[Any] = (ord(column_title[index] ) - 64) * pow(26 , __SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 264 | 0 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase__ = HfApi()
UpperCAmelCase__ = {}
# fmt: off
UpperCAmelCase__ = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCAmelCase__ = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCAmelCase__ = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCAmelCase__ = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCAmelCase__ = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCAmelCase__ = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCAmelCase__ = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCAmelCase__ = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCAmelCase__ = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCAmelCase__ = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCAmelCase__ = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCAmelCase__ = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCAmelCase__ = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCAmelCase__ = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCAmelCase__ = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCAmelCase__ = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase__ = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
UpperCAmelCase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
UpperCAmelCase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 0 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["""memory_attention""", """encoder_attn"""],
["""attention""", """attn"""],
["""/""", """."""],
[""".LayerNorm.gamma""", """_layer_norm.weight"""],
[""".LayerNorm.beta""", """_layer_norm.bias"""],
["""r.layer_""", """r.layers."""],
["""output_proj""", """out_proj"""],
["""ffn.dense_1.""", """fc2."""],
["""ffn.dense.""", """fc1."""],
["""ffn_layer_norm""", """final_layer_norm"""],
["""kernel""", """weight"""],
["""encoder_layer_norm.""", """encoder.layer_norm."""],
["""decoder_layer_norm.""", """decoder.layer_norm."""],
["""embeddings.weights""", """shared.weight"""],
]
def a__ ( _UpperCamelCase : int ):
for pegasus_name, hf_name in PATTERNS:
__lowerCamelCase = k.replace(_UpperCamelCase ,_UpperCamelCase )
return k
def a__ ( _UpperCamelCase : dict ,_UpperCamelCase : dict ):
__lowerCamelCase = DEFAULTS.copy()
cfg_kwargs.update(_UpperCamelCase )
__lowerCamelCase = PegasusConfig(**_UpperCamelCase )
__lowerCamelCase = PegasusForConditionalGeneration(_UpperCamelCase )
__lowerCamelCase = torch_model.model.state_dict()
__lowerCamelCase = {}
for k, v in tf_weights.items():
__lowerCamelCase = rename_state_dict_key(_UpperCamelCase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
__lowerCamelCase = v.T
__lowerCamelCase = torch.tensor(_UpperCamelCase ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
__lowerCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = mapping['''shared.weight''']
__lowerCamelCase = {k: torch.zeros_like(_UpperCamelCase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_UpperCamelCase )
__lowerCamelCase ,__lowerCamelCase = torch_model.model.load_state_dict(_UpperCamelCase ,strict=_UpperCamelCase )
__lowerCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( _UpperCamelCase : str="./ckpt/aeslc/model.ckpt-32000" ):
__lowerCamelCase = tf.train.list_variables(_UpperCamelCase )
__lowerCamelCase = {}
__lowerCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_UpperCamelCase ,desc='''converting tf checkpoint to dict''' ):
__lowerCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase = tf.train.load_variable(_UpperCamelCase ,_UpperCamelCase )
__lowerCamelCase = array
return tf_weights
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str ):
# save tokenizer first
__lowerCamelCase = Path(_UpperCamelCase ).parent.name
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
__lowerCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' ,model_max_length=_UpperCamelCase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_UpperCamelCase )
# convert model
__lowerCamelCase = get_tf_weights_as_numpy(_UpperCamelCase )
__lowerCamelCase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
__lowerCamelCase = task_specific_params
__lowerCamelCase = convert_pegasus(_UpperCamelCase ,_UpperCamelCase )
torch_model.save_pretrained(_UpperCamelCase )
__lowerCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_UpperCamelCase ,Path(_UpperCamelCase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a_ = parser.parse_args()
if args.save_dir is None:
a_ = Path(args.tf_ckpt_path).parent.name
a_ = os.path.join("""pegasus""", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 330 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[str]:
_a = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_a = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_a = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_a = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_a = tempfile.mkdtemp()
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
# load decoder from hub
_a = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
_a = self.add_kwargs_tokens_map.copy()
kwargs.update(__UpperCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Optional[int]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Optional[int]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_tokenizer()
_a = self.get_feature_extractor()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_a = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_a = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCAmelCase ( self ) -> Any:
_a = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(__UpperCAmelCase , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=__UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
_a = floats_list((3, 1000) )
_a = feature_extractor(__UpperCAmelCase , return_tensors='''np''' )
_a = processor(__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> int:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
_a = '''This is a test string'''
_a = processor(text=__UpperCAmelCase )
_a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self , __UpperCAmelCase=(2, 10, 16) , __UpperCAmelCase=77 ) -> List[Any]:
np.random.seed(__UpperCAmelCase )
return np.random.rand(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
_a = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_a = processor.decode(__UpperCAmelCase )
_a = decoder.decode_beams(__UpperCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
_a = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_a = processor.batch_decode(__UpperCAmelCase )
else:
with get_context(__UpperCAmelCase ).Pool() as pool:
_a = processor.batch_decode(__UpperCAmelCase , __UpperCAmelCase )
_a = list(__UpperCAmelCase )
with get_context('''fork''' ).Pool() as p:
_a = decoder.decode_beams_batch(__UpperCAmelCase , __UpperCAmelCase )
_a , _a , _a = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__UpperCAmelCase , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(__UpperCAmelCase , decoded_processor.logit_score )
self.assertListEqual(__UpperCAmelCase , decoded_processor.lm_score )
def _UpperCAmelCase ( self ) -> str:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
_a = self._get_dummy_logits()
_a = 15
_a = -20.0
_a = -4.0
_a = processor.batch_decode(
__UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
_a = decoded_processor_out.text
_a = list(__UpperCAmelCase )
with get_context('''fork''' ).Pool() as pool:
_a = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , beam_width=__UpperCAmelCase , beam_prune_logp=__UpperCAmelCase , token_min_logp=__UpperCAmelCase , )
_a = [d[0][0] for d in decoded_decoder_out]
_a = [d[0][2] for d in decoded_decoder_out]
_a = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __UpperCAmelCase )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __UpperCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(__UpperCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , __UpperCAmelCase , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> Dict:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
_a = self._get_dummy_logits()
_a = 2.0
_a = 5.0
_a = -20.0
_a = True
_a = processor.batch_decode(
__UpperCAmelCase , alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
_a = decoded_processor_out.text
_a = list(__UpperCAmelCase )
decoder.reset_params(
alpha=__UpperCAmelCase , beta=__UpperCAmelCase , unk_score_offset=__UpperCAmelCase , lm_score_boundary=__UpperCAmelCase , )
with get_context('''fork''' ).Pool() as pool:
_a = decoder.decode_beams_batch(
__UpperCAmelCase , __UpperCAmelCase , )
_a = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __UpperCAmelCase )
_a = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
_a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = processor.decoder.model_container[processor.decoder._model_key]
_a = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_a = os.listdir(__UpperCAmelCase )
_a = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_a = WavaVecaProcessorWithLM.from_pretrained(__UpperCAmelCase )
_a = processor.decoder.model_container[processor.decoder._model_key]
_a = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_a = os.listdir(__UpperCAmelCase )
_a = os.listdir(__UpperCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
_a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = floats_list((3, 1000) )
_a = processor_wavaveca(__UpperCAmelCase , return_tensors='''np''' )
_a = processor_auto(__UpperCAmelCase , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_a = self._get_dummy_logits()
_a = processor_wavaveca.batch_decode(__UpperCAmelCase )
_a = processor_auto.batch_decode(__UpperCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , decoder=__UpperCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self ) -> int:
_a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = self._get_dummy_logits()[0]
_a = processor.decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def _UpperCAmelCase ( self ) -> List[str]:
_a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = self._get_dummy_logits()
_a = processor.batch_decode(__UpperCAmelCase , output_word_offsets=__UpperCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(__UpperCAmelCase , __UpperCAmelCase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(__UpperCAmelCase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase ( self ) -> str:
import torch
_a = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__UpperCAmelCase )
_a = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
_a = iter(__UpperCAmelCase )
_a = next(__UpperCAmelCase )
_a = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_a = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_a = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_a = model(__UpperCAmelCase ).logits.cpu().numpy()
_a = processor.decode(logits[0] , output_word_offsets=__UpperCAmelCase )
_a = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_a = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_a = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(__UpperCAmelCase , '''word''' ) ) , __UpperCAmelCase )
self.assertEqual(''' '''.join(self.get_from_offsets(__UpperCAmelCase , '''word''' ) ) , output.text )
# output times
_a = torch.tensor(self.get_from_offsets(__UpperCAmelCase , '''start_time''' ) )
_a = torch.tensor(self.get_from_offsets(__UpperCAmelCase , '''end_time''' ) )
# fmt: off
_a = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_a = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=0.01 ) ) | 153 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _UpperCAmelCase ( self ) -> str:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__UpperCAmelCase , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 153 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : List[Any] = 'convbert'
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A=768 , A=2 , A=9 , A=1 , A=None , **A , ) -> Any:
"""simple docstring"""
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , **A , )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = embedding_size
_a = head_ratio
_a = conv_kernel_size
_a = num_groups
_a = classifier_dropout
class __A ( A ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 211 |
'''simple docstring'''
import math
class __A :
'''simple docstring'''
def __init__(self , A=0 ) -> Dict: # a graph with Node 0,1,...,N-1
"""simple docstring"""
_a = n
_a = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # adjacency matrix for weight
_a = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # dp[i][j] stores minimum distance from i to j
def a__ (self , A , A , A ) -> Tuple:
"""simple docstring"""
_a = w
def a__ (self ) -> List[Any]:
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
_a = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def a__ (self , A , A ) -> str:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
lowercase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 211 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''Wav2Vec2FeatureExtractor'''
_lowercase : List[Any] = '''AutoTokenizer'''
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[str] ) -> Tuple:
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.feature_extractor
lowercase__ = False
@classmethod
def lowerCamelCase_ ( cls: int , UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Optional[int] ) -> Optional[int]:
"""simple docstring"""
try:
return super().from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
except OSError:
warnings.warn(
f'Loading a tokenizer inside {cls.__name__} from a config that does not'
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , UpperCamelCase_ , )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = WavaVecaCTCTokenizer.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
return cls(feature_extractor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
def __call__( self: Optional[int] , *UpperCamelCase_: Dict , **UpperCamelCase_: Dict ) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
lowercase__ = kwargs.pop('''raw_speech''' )
else:
lowercase__ = kwargs.pop('''audio''' , UpperCamelCase_ )
lowercase__ = kwargs.pop('''sampling_rate''' , UpperCamelCase_ )
lowercase__ = kwargs.pop('''text''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
lowercase__ = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
lowercase__ = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ = encodings['''input_ids''']
return inputs
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = kwargs.pop('''input_features''' , UpperCamelCase_ )
lowercase__ = kwargs.pop('''labels''' , UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if input_features is not None:
lowercase__ = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if labels is not None:
lowercase__ = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase__ = labels['''input_ids''']
return input_features
def lowerCamelCase_ ( self: List[str] , *UpperCamelCase_: Optional[Any] , **UpperCamelCase_: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase_ ( self: List[Any] , *UpperCamelCase_: List[Any] , **UpperCamelCase_: Dict ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@contextmanager
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
lowercase__ = True
lowercase__ = self.tokenizer
yield
lowercase__ = self.feature_extractor
lowercase__ = False
| 93 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = len(SCREAMING_SNAKE_CASE )
lowercase__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowercase__ = True
for j in range(SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowercase__ = False
break
if match_found:
position.append(SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 93 | 1 |
from math import loga
def UpperCamelCase_( lowerCamelCase_ ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : str = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Optional[Any] = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
SCREAMING_SNAKE_CASE : List[Any] = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
SCREAMING_SNAKE_CASE : Dict = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCamelCase( _a ):
lowercase_ : Any = VOCAB_FILES_NAMES
lowercase_ : Optional[int] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = VOCAB_FILES_NAMES
lowercase_ : Any = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : str = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
SCREAMING_SNAKE_CASE : Any = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
SCREAMING_SNAKE_CASE : str = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class _lowerCamelCase:
def __call__( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = False, lowerCamelCase = False, lowerCamelCase = None, lowerCamelCase = None, lowerCamelCase = None, **lowerCamelCase, ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
elif titles is None or texts is None:
_lowercase : Dict = titles if texts is None else texts
return super().__call__(
lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase, return_attention_mask=lowerCamelCase, **lowerCamelCase, )
_lowercase : Union[str, Any] = titles if not isinstance(lowerCamelCase, lowerCamelCase) else [titles]
_lowercase : Tuple = texts if not isinstance(lowerCamelCase, lowerCamelCase) else [texts]
_lowercase : Optional[Any] = len(lowerCamelCase)
_lowercase : Any = questions if not isinstance(lowerCamelCase, lowerCamelCase) else [questions] * n_passages
if len(lowerCamelCase) != len(lowerCamelCase):
raise ValueError(
F'''There should be as many titles than texts but got {len(lowerCamelCase)} titles and {len(lowerCamelCase)} texts.''')
_lowercase : Any = super().__call__(lowerCamelCase, lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : Tuple = super().__call__(lowerCamelCase, add_special_tokens=lowerCamelCase, padding=lowerCamelCase, truncation=lowerCamelCase)['input_ids']
_lowercase : int = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCamelCase, lowerCamelCase)
]
}
if return_attention_mask is not False:
_lowercase : Optional[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
_lowercase : Union[str, Any] = attention_mask
return self.pad(lowerCamelCase, padding=lowerCamelCase, max_length=lowerCamelCase, return_tensors=lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase = 16, lowerCamelCase = 64, lowerCamelCase = 4, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : Union[str, Any] = reader_input['input_ids']
_lowercase , _lowercase , _lowercase : Tuple = reader_output[:3]
_lowercase : Tuple = len(lowerCamelCase)
_lowercase : str = sorted(range(lowerCamelCase), reverse=lowerCamelCase, key=relevance_logits.__getitem__)
_lowercase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowercase : str = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
_lowercase : Any = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowercase : List[Any] = sequence_ids.index(self.pad_token_id)
else:
_lowercase : List[str] = len(lowerCamelCase)
_lowercase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCamelCase, top_spans=lowerCamelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCamelCase, start_index=lowerCamelCase, end_index=lowerCamelCase, text=self.decode(sequence_ids[start_index : end_index + 1]), ))
if len(lowerCamelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[DPRSpanPrediction]:
"""simple docstring"""
_lowercase : str = []
for start_index, start_score in enumerate(lowerCamelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
_lowercase : Dict = sorted(lowerCamelCase, key=lambda lowerCamelCase: x[1], reverse=lowerCamelCase)
_lowercase : List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''')
_lowercase : Dict = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCamelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _lowerCamelCase( _a, _a ):
lowercase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase_ : Any = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Dict = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
lowercase_ : str = ["""input_ids""", """attention_mask"""]
| 21 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['input_features', 'attention_mask']
def __init__( self : List[Any] , lowerCAmelCase_ : Union[str, Any]=80 , lowerCAmelCase_ : int=1_60_00 , lowerCAmelCase_ : Union[str, Any]=80 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=True , **lowerCAmelCase_ : Tuple , ) -> Optional[int]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , **lowerCAmelCase_ )
A__ : Dict =num_mel_bins
A__ : Optional[int] =do_ceptral_normalize
A__ : Union[str, Any] =normalize_means
A__ : Union[str, Any] =normalize_vars
A__ : Any =True
def lowercase__ ( self : List[str] , lowerCAmelCase_ : np.ndarray , ) -> np.ndarray:
'''simple docstring'''
A__ : int =waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A__ : Optional[int] =torch.from_numpy(lowerCAmelCase_ ).unsqueeze(0 )
A__ : List[str] =ta_kaldi.fbank(lowerCAmelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowercase__ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[bool] = True , lowerCAmelCase_ : Optional[bool] = True , lowerCAmelCase_ : float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
# make sure we normalize float32 arrays
if normalize_means:
A__ : List[Any] =x[:input_length].mean(axis=0 )
A__ : int =np.subtract(lowerCAmelCase_ , lowerCAmelCase_ )
if normalize_vars:
A__ : Union[str, Any] =x[:input_length].std(axis=0 )
A__ : str =np.divide(lowerCAmelCase_ , lowerCAmelCase_ )
if input_length < x.shape[0]:
A__ : Dict =padding_value
# make sure array is in float32
A__ : List[str] =x.astype(np.floataa )
return x
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : List[np.ndarray] , lowerCAmelCase_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
A__ : Union[str, Any] =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase_ , lowerCAmelCase_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase_ , lowerCAmelCase_ )
]
def __call__( self : str , lowerCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , **lowerCAmelCase_ : int , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A__ : Dict =isinstance(lowerCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A__ : Union[str, Any] =is_batched_numpy or (
isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ : Tuple =[np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray ):
A__ : int =np.asarray(lowerCAmelCase_ , dtype=np.floataa )
elif isinstance(lowerCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ : int =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ : int =[raw_speech]
# extract fbank features
A__ : int =[self._extract_fbank_features(lowerCAmelCase_ ) for waveform in raw_speech]
# convert into correct format for padding
A__ : List[str] =BatchFeature({"""input_features""": features} )
A__ : int =self.pad(
lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
# make sure list is in array format
A__ : Any =padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , lowerCAmelCase_ ):
A__ : List[str] =[np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for feature in input_features]
A__ : Dict =padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
A__ : List[str] =[np.asarray(lowerCAmelCase_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A__ : Any =(
np.array(lowerCAmelCase_ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase_ , max_length=lowerCAmelCase_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ : List[str] =self.normalize(
padded_inputs["""input_features"""] , attention_mask=lowerCAmelCase_ )
if return_tensors is not None:
A__ : Union[str, Any] =padded_inputs.convert_to_tensors(lowerCAmelCase_ )
return padded_inputs
| 368 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[Any] =ArgumentParser("""Transformers CLI tool""", usage="""transformers-cli <command> [<args>]""" )
A__ : List[Any] =parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__snake_case )
DownloadCommand.register_subcommand(__snake_case )
EnvironmentCommand.register_subcommand(__snake_case )
RunCommand.register_subcommand(__snake_case )
ServeCommand.register_subcommand(__snake_case )
UserCommands.register_subcommand(__snake_case )
AddNewModelCommand.register_subcommand(__snake_case )
AddNewModelLikeCommand.register_subcommand(__snake_case )
LfsCommands.register_subcommand(__snake_case )
PTtoTFCommand.register_subcommand(__snake_case )
# Let's go
A__ : List[str] =parser.parse_args()
if not hasattr(__snake_case, """func""" ):
parser.print_help()
exit(1 )
# Run
A__ : Optional[Any] =args.func(__snake_case )
service.run()
if __name__ == "__main__":
main()
| 136 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
__lowercase : Dict = XLMTokenizer
__lowercase : Union[str, Any] = False
def snake_case_ ( self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__SCREAMING_SNAKE_CASE = dict(zip(snake_case__ , range(len(snake_case__))))
__SCREAMING_SNAKE_CASE = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""") as fp:
fp.write(json.dumps(snake_case__))
with open(self.merges_file , """w""") as fp:
fp.write("""\n""".join(snake_case__))
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = """lower newer"""
return input_text, output_text
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLMTokenizer(self.vocab_file , self.merges_file)
__SCREAMING_SNAKE_CASE = """lower"""
__SCREAMING_SNAKE_CASE = ["""low""", """er</w>"""]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(snake_case__)
self.assertListEqual(snake_case__ , snake_case__)
__SCREAMING_SNAKE_CASE = tokens + ["""<unk>"""]
__SCREAMING_SNAKE_CASE = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__) , snake_case__)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLMTokenizer.from_pretrained("""xlm-mlm-en-2048""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(snake_case__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 100 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=True , snake_case_="pt" ):
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(snake_case_ , snake_case_ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=snake_case_ , padding="max_length" if pad_to_max_length else None , truncation=snake_case_ , return_tensors=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , ):
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str]="train" , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[str]="" , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : Optional[Any] ):
"""simple docstring"""
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def UpperCamelCase ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case__ , snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase_ : Dict = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(snake_case_ , os.path.join(snake_case_ , "git_log.json" ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=4 , **snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=snake_case_ , **snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=snake_case_ )
_UpperCAmelCase = {
"repo_id": str(snake_case_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return list(map(snake_case_ , snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "wb" ) as f:
return pickle.dump(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
def remove_articles(snake_case_ ):
return re.sub(R"\b(a|an|the)\b" , " " , snake_case_ )
def white_space_fix(snake_case_ ):
return " ".join(text.split() )
def remove_punc(snake_case_ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = Counter(snake_case_ ) & Counter(snake_case_ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
_UpperCAmelCase = 0
for hypo, pred in zip(snake_case_ , snake_case_ ):
em += exact_match_score(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
if not hasattr(snake_case_ , snake_case_ ) and not hasattr(snake_case_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case_ ) )
delattr(snake_case_ , snake_case_ )
continue
_UpperCAmelCase = p if hasattr(snake_case_ , snake_case_ ) else equivalent_param[p]
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
delattr(snake_case_ , snake_case_ )
return hparams, config
| 133 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class A__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 'ml.p3.2xlarge'
SCREAMING_SNAKE_CASE = 'accelerate_sagemaker_execution_role'
SCREAMING_SNAKE_CASE = 'hf-sm'
SCREAMING_SNAKE_CASE = 'us-east-1'
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 'accelerate-sagemaker-1'
SCREAMING_SNAKE_CASE = '1.6'
SCREAMING_SNAKE_CASE = '4.4'
SCREAMING_SNAKE_CASE = 'train.py'
SCREAMING_SNAKE_CASE = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
SCREAMING_SNAKE_CASE = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args["model_name_or_path"] , _SCREAMING_SNAKE_CASE)
assert isinstance(converted_args["do_train"] , _SCREAMING_SNAKE_CASE)
assert isinstance(converted_args["epochs"] , _SCREAMING_SNAKE_CASE)
assert isinstance(converted_args["learning_rate"] , _SCREAMING_SNAKE_CASE)
assert isinstance(converted_args["max_steps"] , _SCREAMING_SNAKE_CASE)
with pytest.raises(_SCREAMING_SNAKE_CASE):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 364 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: List[str] , **_SCREAMING_SNAKE_CASE: Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE)
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
requires_backends(self , "vision")
self.check_model_type(_SCREAMING_SNAKE_CASE)
def __call__( self: str , _SCREAMING_SNAKE_CASE: Union[str, "Image.Image", List[Dict[str, Any]]] , _SCREAMING_SNAKE_CASE: Union[str, List[str]] = None , **_SCREAMING_SNAKE_CASE: Optional[Any] , ) -> int:
"""simple docstring"""
if "text_queries" in kwargs:
__lowerCAmelCase : List[str] = kwargs.pop("text_queries")
if isinstance(_SCREAMING_SNAKE_CASE , (str, Image.Image)):
__lowerCAmelCase : Any = {"image": image, "candidate_labels": candidate_labels}
else:
__lowerCAmelCase : Dict = image
__lowerCAmelCase : Optional[int] = super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
return results
def _SCREAMING_SNAKE_CASE ( self: Any , **_SCREAMING_SNAKE_CASE: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = {}
if "threshold" in kwargs:
__lowerCAmelCase : Optional[int] = kwargs["threshold"]
if "top_k" in kwargs:
__lowerCAmelCase : int = kwargs["top_k"]
return {}, {}, postprocess_params
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Dict) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = load_image(inputs["image"])
__lowerCAmelCase : Union[str, Any] = inputs["candidate_labels"]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = candidate_labels.split(",")
__lowerCAmelCase : Union[str, Any] = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(_SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__lowerCAmelCase : Dict = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework)
yield {
"is_last": i == len(_SCREAMING_SNAKE_CASE) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = model_inputs.pop("target_size")
__lowerCAmelCase : Any = model_inputs.pop("candidate_label")
__lowerCAmelCase : List[str] = model_inputs.pop("is_last")
__lowerCAmelCase : Dict = self.model(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self: List[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Optional[int]=None) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = []
for model_output in model_outputs:
__lowerCAmelCase : Dict = model_output["candidate_label"]
__lowerCAmelCase : int = BaseModelOutput(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = self.image_processor.post_process_object_detection(
outputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE , target_sizes=model_output["target_size"])[0]
for index in outputs["scores"].nonzero():
__lowerCAmelCase : Any = outputs["scores"][index].item()
__lowerCAmelCase : int = self._get_bounding_box(outputs["boxes"][index][0])
__lowerCAmelCase : List[str] = {"score": score, "label": label, "box": box}
results.append(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE: x["score"] , reverse=_SCREAMING_SNAKE_CASE)
if top_k:
__lowerCAmelCase : str = results[:top_k]
return results
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: "torch.Tensor") -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.")
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = box.int().tolist()
__lowerCAmelCase : Any = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 58 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase_ : List[str] = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 286 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = CustomTokenizer
pass | 286 | 1 |
def lowerCAmelCase( __lowerCamelCase = 100_0000 ):
__a = limit + 1
__a = [0] * limit
for first_term in range(1 , __lowerCamelCase ):
for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__a = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 197 | from __future__ import annotations
import numpy as np
def lowerCAmelCase( __lowerCamelCase ):
return np.maximum(0 , __lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 197 | 1 |
def lowerCamelCase_ ( UpperCamelCase__ : list[list[int | float]] ) -> int:
"""simple docstring"""
__lowerCamelCase = len(UpperCamelCase__ )
__lowerCamelCase = len(matrix[0] )
__lowerCamelCase = min(UpperCamelCase__ , UpperCamelCase__ )
for row in range(UpperCamelCase__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , UpperCamelCase__ ):
__lowerCamelCase = matrix[col][row] / matrix[row][row]
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__lowerCamelCase = True
for i in range(row + 1 , UpperCamelCase__ ):
if matrix[i][row] != 0:
__lowerCamelCase , __lowerCamelCase = matrix[i], matrix[row]
__lowerCamelCase = False
break
if reduce:
rank -= 1
for i in range(UpperCamelCase__ ):
__lowerCamelCase = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Optional[Any]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : str = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Any = nn.Parameter(__a )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] , __a : int ):
"""simple docstring"""
_a : Tuple = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : List[str] ):
"""simple docstring"""
_a : Dict = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : str = np.asarray(weights[2] )
_a : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
_a : List[str] = weights[0][0][0]
_a : List[Any] = np.asarray(layer_norm_a[0] )
_a : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_a : Any = np.asarray(intermediate_weights[0][0] )
_a : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Any = np.asarray(intermediate_weights[1][0] )
_a : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Optional[int] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Dict , __a : Dict , __a : List[Any] ):
"""simple docstring"""
_a : Optional[int] = torch_model.reformer
# word embeds
_a : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : Any = nn.Parameter(torch.tensor(__a ) )
_a : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : Optional[Any] = np.asarray(weights[7][0] )
_a : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : List[str] = np.asarray(weights[9][0] )
_a : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : Dict ):
"""simple docstring"""
_a : List[Any] = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : Optional[Any] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 271 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a=0.2 , _a=0.2 ):
__a = bp_numa
__a = bp_numa
__a = bp_numa
__a = conva_get[:2]
__a = conva_get[2]
__a = size_pa
__a = rate_w
__a = rate_t
__a = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a = -2 * np.random.rand(self.conva[1] ) + 1
__a = -2 * np.random.rand(self.num_bpa ) + 1
__a = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCAmelCase ( self , _a ):
# save model dict with pickle
__a = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_a , '''wb''' ) as f:
pickle.dump(_a , _a )
print(f'''Model saved: {save_path}''' )
@classmethod
def __UpperCAmelCase ( cls , _a ):
# read saved model
with open(_a , '''rb''' ) as f:
__a = pickle.load(_a ) # noqa: S301
__a = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
__a = model_dic.get('''size_pooling1''' )
__a = model_dic.get('''num_bp1''' )
__a = model_dic.get('''num_bp2''' )
__a = model_dic.get('''num_bp3''' )
__a = model_dic.get('''rate_weight''' )
__a = model_dic.get('''rate_thre''' )
# create model instance
__a = CNN(_a , _a , _a , _a , _a , _a , _a )
# modify model parameter
__a = model_dic.get('''w_conv1''' )
__a = model_dic.get('''wkj''' )
__a = model_dic.get('''vji''' )
__a = model_dic.get('''thre_conv1''' )
__a = model_dic.get('''thre_bp2''' )
__a = model_dic.get('''thre_bp3''' )
return conv_ins
def __UpperCAmelCase ( self , _a ):
return 1 / (1 + np.exp(-1 * x ))
def __UpperCAmelCase ( self , _a ):
return round(_a , 3 )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
# convolution process
__a = convs[0]
__a = convs[1]
__a = np.shape(_a )[0]
# get the data slice of original image data, data_focus
__a = []
for i_focus in range(0 , size_data - size_conv + 1 , _a ):
for j_focus in range(0 , size_data - size_conv + 1 , _a ):
__a = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_a )
# calculate the feature map of every single kernel, and saved as list of matrix
__a = []
__a = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_a ):
__a = []
for i_focus in range(len(_a ) ):
__a = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_a ) )
__a = np.asmatrix(_a ).reshape(
_a , _a )
data_featuremap.append(_a )
# expanding the data slice to One dimenssion
__a = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_a ) )
__a = np.asarray(_a )
return focus_list, data_featuremap
def __UpperCAmelCase ( self , _a , _a , _a="average_pool" ):
# pooling process
__a = len(featuremaps[0] )
__a = int(size_map / size_pooling )
__a = []
for i_map in range(len(_a ) ):
__a = featuremaps[i_map]
__a = []
for i_focus in range(0 , _a , _a ):
for j_focus in range(0 , _a , _a ):
__a = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_a ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_a ) )
__a = np.asmatrix(_a ).reshape(_a , _a )
featuremap_pooled.append(_a )
return featuremap_pooled
def __UpperCAmelCase ( self , _a ):
# expanding three dimension data to one dimension list
__a = []
for i in range(len(_a ) ):
__a = np.shape(data[i] )
__a = data[i].reshape(1 , shapes[0] * shapes[1] )
__a = data_listed.getA().tolist()[0]
data_expanded.extend(_a )
__a = np.asarray(_a )
return data_expanded
def __UpperCAmelCase ( self , _a ):
# expanding matrix to one dimension list
__a = np.asarray(_a )
__a = np.shape(_a )
__a = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a ):
__a = []
__a = 0
for i_map in range(_a ):
__a = np.ones((size_map, size_map) )
for i in range(0 , _a , _a ):
for j in range(0 , _a , _a ):
__a = pd_pool[
i_pool
]
__a = i_pool + 1
__a = np.multiply(
_a , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_a )
return pd_all
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_a )) )
print((''' - - Shape: Teach_Data ''', np.shape(_a )) )
__a = 0
__a = []
__a = 10_000
while rp < n_repeat and mse >= error_accuracy:
__a = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(_a ) ):
# print('------------Learning Image: %d--------------'%p)
__a = np.asmatrix(datas_train[p] )
__a = np.asarray(datas_teach[p] )
__a , __a = self.convolute(
_a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a = self.pooling(_a , self.size_poolinga )
__a = np.shape(_a )
__a = self._expand(_a )
__a = data_bp_input
__a = np.dot(_a , self.vji.T ) - self.thre_bpa
__a = self.sig(_a )
__a = np.dot(_a , self.wkj.T ) - self.thre_bpa
__a = self.sig(_a )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__a = np.multiply(
(data_teach - bp_outa) , np.multiply(_a , (1 - bp_outa) ) )
__a = np.multiply(
np.dot(_a , self.wkj ) , np.multiply(_a , (1 - bp_outa) ) )
__a = np.dot(_a , self.vji )
__a = pd_i_all / (self.size_poolinga * self.size_poolinga)
__a = pd_conva_pooled.T.getA().tolist()
__a = self._calculate_gradient_from_pool(
_a , _a , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__a = self._expand_mat(pd_conva_all[k_conv] )
__a = self.rate_weight * np.dot(_a , _a )
__a = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__a = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__a = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__a = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__a = self.thre_bpa - pd_k_all * self.rate_thre
__a = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__a = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__a = rp + 1
__a = error_count / patterns
all_mse.append(_a )
def draw_error():
__a = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_a , '''+-''' )
plt.plot(_a , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_a , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def __UpperCAmelCase ( self , _a ):
# model predict
__a = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_a )) )
for p in range(len(_a ) ):
__a = np.asmatrix(datas_test[p] )
__a , __a = self.convolute(
_a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a = self.pooling(_a , self.size_poolinga )
__a = self._expand(_a )
__a = data_bp_input
__a = bp_outa * self.vji.T - self.thre_bpa
__a = self.sig(_a )
__a = bp_outa * self.wkj.T - self.thre_bpa
__a = self.sig(_a )
produce_out.extend(bp_outa.getA().tolist() )
__a = [list(map(self.do_round , _a ) ) for each in produce_out]
return np.asarray(_a )
def __UpperCAmelCase ( self , _a ):
# return the data of image after convoluting process so we can check it out
__a = np.asmatrix(_a )
__a , __a = self.convolute(
_a , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a = self.pooling(_a , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 368 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 0 |
from collections.abc import Callable
import numpy as np
def lowercase__ ( __snake_case : Callable , __snake_case : float , __snake_case : float , __snake_case : float , __snake_case : float ):
'''simple docstring'''
UpperCAmelCase_ : str = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase_ : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase_ : Union[str, Any] = ya
UpperCAmelCase_ : List[Any] = xa
for k in range(__snake_case ):
UpperCAmelCase_ : Any = y[k] + step_size * ode_func(__snake_case , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 |
def lowercase__ ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCAmelCase_ : Tuple = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Union[str, Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : List[Any] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29 | 1 |
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
if exponent == 1:
return base
if exponent % 2 == 0:
a = _modexpt(lowerCamelCase_ , exponent // 2 , lowerCamelCase_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowerCamelCase_ , exponent - 1 , lowerCamelCase_ )) % modulo_value
def __A ( __lowerCamelCase = 1777 , __lowerCamelCase = 1855 , __lowerCamelCase = 8 ) -> Dict:
a = base
for _ in range(1 , lowerCamelCase_ ):
a = _modexpt(lowerCamelCase_ , lowerCamelCase_ , 10**digits )
return result
if __name__ == "__main__":
print(F'{solution() = }')
| 358 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCamelCase : Union[str, Any] = (720, 1_280) # Height, Width
__UpperCamelCase : Any = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCamelCase : str = 1 / 100
__UpperCamelCase : Optional[int] = ""
__UpperCamelCase : List[Any] = ""
__UpperCamelCase : Union[str, Any] = ""
__UpperCamelCase : Tuple = 250
def __A ( ) -> None:
a , a = get_dataset(__lowerCamelCase , __lowerCamelCase )
for index in range(__lowerCamelCase ):
a = random.sample(range(len(__lowerCamelCase ) ) , 4 )
a , a , a = update_image_and_anno(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
a = random_chars(32 )
a = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
a = []
for anno in new_annos:
a = anno[3] - anno[1]
a = anno[4] - anno[2]
a = anno[1] + width / 2
a = anno[2] + height / 2
a = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(__lowerCamelCase )
with open(f'{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
a = []
a = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , """*.txt""" ) ):
a = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCamelCase ) as in_file:
a = in_file.readlines()
a = os.path.join(__lowerCamelCase , f'{label_name}.jpg' )
a = []
for obj_list in obj_lists:
a = obj_list.rstrip("""\n""" ).split(""" """ )
a = float(obj[1] ) - float(obj[3] ) / 2
a = float(obj[2] ) - float(obj[4] ) / 2
a = float(obj[1] ) + float(obj[3] ) / 2
a = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 0.0 , ) -> tuple[list, list, str]:
a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
a = int(scale_x * output_size[1] )
a = int(scale_y * output_size[0] )
a = []
a = []
for i, index in enumerate(__lowerCamelCase ):
a = all_img_list[index]
path_list.append(__lowerCamelCase )
a = all_annos[index]
a = cva.imread(__lowerCamelCase )
if i == 0: # top-left
a = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = bbox[2] * scale_y
a = bbox[3] * scale_x
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
a = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = bbox[2] * scale_y
a = scale_x + bbox[3] * (1 - scale_x)
a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
a = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = bbox[1] * scale_x
a = scale_y + bbox[2] * (1 - scale_y)
a = bbox[3] * scale_x
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
a = cva.resize(
__lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
a = img
for bbox in img_annos:
a = scale_x + bbox[1] * (1 - scale_x)
a = scale_y + bbox[2] * (1 - scale_y)
a = scale_x + bbox[3] * (1 - scale_x)
a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
a = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __A ( __lowerCamelCase ) -> str:
assert number_char > 1, "The number of character should greater than 1"
a = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 347 | 0 |
'''simple docstring'''
class a__( lowerCAmelCase_ ):
pass
class a__( lowerCAmelCase_ ):
pass
class a__:
def __init__( self : List[Any] ):
a : Tuple = [
[],
[],
[],
]
def lowercase_ ( self : List[str] , __snake_case : int , __snake_case : int ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(lowerCamelCase__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def lowercase_ ( self : str ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Tuple ):
return "\n".join(F"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class a__:
def __init__( self : int ):
a : List[str] = []
def lowercase_ ( self : str , __snake_case : int ):
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(lowerCamelCase__ )
def lowercase_ ( self : List[str] ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
a : Tuple = min(self.queue )
self.queue.remove(lowerCamelCase__ )
return data
def __str__( self : List[str] ):
return str(self.queue )
def lowerCamelCase__ ( ):
a : Optional[Any] = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def lowerCamelCase__ ( ):
a : int = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 297 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git_vision_model"
def __init__( self : List[Any] ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Union[str, Any]=3072 ,lowerCamelCase__ : Optional[int]=12 ,lowerCamelCase__ : Tuple=12 ,lowerCamelCase__ : Tuple=3 ,lowerCamelCase__ : Optional[Any]=224 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]="quick_gelu" ,lowerCamelCase__ : Optional[Any]=1e-5 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : Optional[int]=0.02 ,**lowerCamelCase__ : Union[str, Any] ,) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(lowerCamelCase__ )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = "git"
def __init__( self : Optional[int] ,lowerCamelCase__ : int=None ,lowerCamelCase__ : str=30522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Union[str, Any]=6 ,lowerCamelCase__ : str=12 ,lowerCamelCase__ : List[str]=3072 ,lowerCamelCase__ : Dict="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : Any=0.1 ,lowerCamelCase__ : List[str]=1024 ,lowerCamelCase__ : List[str]=0.02 ,lowerCamelCase__ : str=1e-1_2 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : str=False ,lowerCamelCase__ : int=101 ,lowerCamelCase__ : int=102 ,lowerCamelCase__ : Dict=None ,**lowerCamelCase__ : List[Any] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
SCREAMING_SNAKE_CASE = GitVisionConfig(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = num_image_with_embedding
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 296 | 0 |
from __future__ import annotations
lowerCAmelCase__ : str =1.6021E-19 # units = C
def __lowercase ( a__ , a__ , a__ , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif conductivity < 0:
raise ValueError('Conductivity cannot be negative' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative' )
elif mobility < 0:
raise ValueError('mobility cannot be negative' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 118 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
UpperCamelCase__ : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
UpperCamelCase__ : Optional[str] = field(
default=UpperCamelCase_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __lowercase ( ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments,) )
((__SCREAMING_SNAKE_CASE) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=a__ , decoder_config=a__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
__SCREAMING_SNAKE_CASE = decoder_config.decoder_start_token_id
__SCREAMING_SNAKE_CASE = decoder_config.pad_token_id
if decoder_start_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.bos_token_id
if pad_token_id is None:
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
__SCREAMING_SNAKE_CASE = decoder_config.eos_token_id
__SCREAMING_SNAKE_CASE = decoder_start_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 118 | 1 |
from __future__ import annotations
from random import random
class __lowercase :
"""simple docstring"""
def __init__( self , A = None ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = value
lowerCamelCase = random()
lowerCamelCase = None
lowerCamelCase = None
def __repr__( self ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
'''simple docstring'''
lowerCamelCase = str(self.value ) + """ """
lowerCamelCase = str(self.left or """""" )
lowerCamelCase = str(self.right or """""" )
return value + left + right
def __lowerCamelCase ( lowerCamelCase__ : Node | None , lowerCamelCase__ : int ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCamelCase , lowerCamelCase = split(root.left , lowerCamelCase__ )
return left, root
else:
lowerCamelCase , lowerCamelCase = split(root.right , lowerCamelCase__ )
return root, right
def __lowerCamelCase ( lowerCamelCase__ : Node | None , lowerCamelCase__ : Node | None ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCamelCase = merge(left.right , lowerCamelCase__ )
return left
else:
lowerCamelCase = merge(lowerCamelCase__ , right.left )
return right
def __lowerCamelCase ( lowerCamelCase__ : Node | None , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = Node(lowerCamelCase__ )
lowerCamelCase , lowerCamelCase = split(lowerCamelCase__ , lowerCamelCase__ )
return merge(merge(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Node | None , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = split(lowerCamelCase__ , value - 1 )
lowerCamelCase , lowerCamelCase = split(lowerCamelCase__ , lowerCamelCase__ )
return merge(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ : Node | None ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def __lowerCamelCase ( lowerCamelCase__ : Node | None , lowerCamelCase__ : str ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowerCamelCase = insert(lowerCamelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowerCamelCase = erase(lowerCamelCase__ , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
lowerCamelCase = input()
while args != "q":
lowerCamelCase = interact_treap(lowerCamelCase__ , lowerCamelCase__ )
print(lowerCamelCase__ )
lowerCamelCase = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 252 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = ["image_processor", "tokenizer"]
UpperCamelCase : Dict = "BridgeTowerImageProcessor"
UpperCamelCase : List[Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , A , A ) -> Optional[int]:
'''simple docstring'''
super().__init__(A , A )
def __call__( self , A , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ) -> BatchEncoding:
'''simple docstring'''
lowerCamelCase = self.tokenizer(
text=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel_values + pixel_mask
lowerCamelCase = self.image_processor(
A , return_tensors=A , do_normalize=A , do_center_crop=A , **A )
encoding.update(A )
return encoding
def __A ( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*A , **A )
def __A ( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*A , **A )
@property
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.tokenizer.model_input_names
lowerCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 252 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "visual_bert"
def __init__( self: Any, a_: Tuple=30_522, a_: Dict=768, a_: Optional[Any]=512, a_: Optional[Any]=12, a_: str=12, a_: Union[str, Any]=3_072, a_: int="gelu", a_: Dict=0.1, a_: Optional[int]=0.1, a_: str=512, a_: int=2, a_: List[Any]=0.02, a_: Tuple=1E-12, a_: Any=False, a_: Dict=True, a_: List[Any]=1, a_: str=0, a_: Dict=2, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase, bos_token_id=__UpperCAmelCase, eos_token_id=__UpperCAmelCase, **__UpperCAmelCase )
_snake_case : List[Any] = vocab_size
_snake_case : Tuple = max_position_embeddings
_snake_case : int = hidden_size
_snake_case : Tuple = visual_embedding_dim
_snake_case : Tuple = num_hidden_layers
_snake_case : List[str] = num_attention_heads
_snake_case : Tuple = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : int = hidden_dropout_prob
_snake_case : List[str] = attention_probs_dropout_prob
_snake_case : Dict = initializer_range
_snake_case : List[Any] = type_vocab_size
_snake_case : List[str] = layer_norm_eps
_snake_case : Tuple = bypass_transformer
_snake_case : List[Any] = special_visual_initialize
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''MaskFormerFeatureExtractor''']
A_ = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
A_ = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 132 | 0 |
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase_ ( __lowercase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( ) -> Iterator[int]:
'''simple docstring'''
_UpperCAmelCase = 2
while True:
if is_prime(__lowercase ):
yield num
num += 1
def UpperCAmelCase_ ( __lowercase : int = 200_0000 ) -> int:
'''simple docstring'''
return sum(takewhile(lambda __lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 22 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def __magic_name__ ( lowercase ):
if not isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: int =f'''Input value of [number={number}] must be an integer'''
raise TypeError(lowercase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(lowercase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 | 0 |
"""simple docstring"""
from __future__ import annotations
def _A ( lowercase ):
"""simple docstring"""
a =[True] * limit
a =False
a =False
a =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
a =i * 2
while index < limit:
a =False
a =index + i
a =[2]
for i in range(3 , lowercase , 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def _A ( lowercase = 1_00_00_00 ):
"""simple docstring"""
a =prime_sieve(lowercase )
a =0
a =0
for i in range(len(lowercase ) ):
for j in range(i + length , len(lowercase ) ):
a =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
a =j - i
a =sol
return largest
if __name__ == "__main__":
print(F'{solution() = }') | 356 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 42
# setable values
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = None
@classmethod
def SCREAMING_SNAKE_CASE ( cls , __A , __A , __A ) -> List[str]:
return cls(common=__A , init_noise_sigma=__A , timesteps=__A )
@dataclass
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = 42
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCAmelCase = 42
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
return True
@register_to_config
def __init__( self , __A = 1000 , __A = 0.0_001 , __A = 0.02 , __A = "linear" , __A = None , __A = "fixed_small" , __A = True , __A = "epsilon" , __A = jnp.floataa , ) -> List[Any]:
a =dtype
def SCREAMING_SNAKE_CASE ( self , __A = None ) -> DDPMSchedulerState:
if common is None:
a =CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
a =jnp.array(1.0 , dtype=self.dtype )
a =jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__A , init_noise_sigma=__A , timesteps=__A , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None ) -> jnp.ndarray:
return sample
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = () ) -> DDPMSchedulerState:
a =self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
a =(jnp.arange(0 , __A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__A , timesteps=__A , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=None , __A=None ) -> str:
a =state.common.alphas_cumprod[t]
a =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a =(1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
a =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
a =jnp.clip(__A , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
a =jnp.log(jnp.clip(__A , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
a =state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
a =jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
a =variance
a =state.common.betas[t]
a =(predicted_variance + 1) / 2
a =frac * max_log + (1 - frac) * min_log
return variance
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A = None , __A = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
a =timestep
if key is None:
a =jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
a , a =jnp.split(__A , sample.shape[1] , axis=1 )
else:
a =None
# 1. compute alphas, betas
a =state.common.alphas_cumprod[t]
a =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
a =1 - alpha_prod_t
a =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a =model_output
elif self.config.prediction_type == "v_prediction":
a =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a =jnp.clip(__A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a =(alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
a =state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
a =jax.random.split(__A , num=1 )
a =jax.random.normal(__A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__A , __A , predicted_variance=__A ) ** 0.5) * noise
a =jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
a =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__A , state=__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , ) -> jnp.ndarray:
return add_noise_common(state.common , __A , __A , __A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , ) -> jnp.ndarray:
return get_velocity_common(state.common , __A , __A , __A )
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps | 215 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ):
__lowerCAmelCase = AudioLDMPipeline
__lowerCAmelCase = TEXT_TO_AUDIO_PARAMS
__lowerCAmelCase = TEXT_TO_AUDIO_BATCH_PARAMS
__lowerCAmelCase = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCamelCase_ , )
UpperCamelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCamelCase = ClapTextModelWithProjection(lowerCamelCase_ )
UpperCamelCase = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
UpperCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCamelCase_ , )
UpperCamelCase = SpeechTaHifiGan(lowerCamelCase_ )
UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith("""mps""" ):
UpperCamelCase = torch.manual_seed(lowerCamelCase_ )
else:
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = audioldm_pipe(**lowerCamelCase_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
UpperCamelCase = audio[:10]
UpperCamelCase = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase = audioldm_pipe(**lowerCamelCase_ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase = audioldm_pipe.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="""pt""" , )
UpperCamelCase = text_inputs["""input_ids"""].to(lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.text_encoder(
lowerCamelCase_ , )
UpperCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(lowerCamelCase_ , dim=-1 )
UpperCamelCase = prompt_embeds
# forward
UpperCamelCase = audioldm_pipe(**lowerCamelCase_ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * ["""this is a negative prompt"""]
UpperCamelCase = negative_prompt
UpperCamelCase = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase = audioldm_pipe(**lowerCamelCase_ )
UpperCamelCase = output.audios[0]
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase = []
for p in [prompt, negative_prompt]:
UpperCamelCase = audioldm_pipe.tokenizer(
lowerCamelCase_ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors="""pt""" , )
UpperCamelCase = text_inputs["""input_ids"""].to(lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.text_encoder(
lowerCamelCase_ , )
UpperCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCamelCase = F.normalize(lowerCamelCase_ , dim=-1 )
embeds.append(lowerCamelCase_ )
UpperCamelCase , UpperCamelCase = embeds
# forward
UpperCamelCase = audioldm_pipe(**lowerCamelCase_ )
UpperCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCamelCase = AudioLDMPipeline(**lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = """egg cracking"""
UpperCamelCase = audioldm_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
UpperCamelCase = audio[:10]
UpperCamelCase = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
UpperCamelCase = AudioLDMPipeline(**lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
UpperCamelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCamelCase = 2
UpperCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.vocoder.config.sampling_rate
UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ )
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.0_1_6 , **lowerCamelCase_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.0_1_6
UpperCamelCase = audioldm_pipe(audio_length_in_s=0.0_3_2 , **lowerCamelCase_ )
UpperCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.0_3_2
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = AudioLDMPipeline(**lowerCamelCase_ )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = ["""hey"""]
UpperCamelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
assert audio_shape == (1, 256)
UpperCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCamelCase = SpeechTaHifiGan(lowerCamelCase_ ).to(lowerCamelCase_ )
UpperCamelCase = audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
UpperCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase_ )
@slow
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]="cpu" , lowerCamelCase_ : List[str]=torch.floataa , lowerCamelCase_ : Tuple=0 ):
"""simple docstring"""
UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCamelCase = np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 8, 128, 16) )
UpperCamelCase = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
UpperCamelCase = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_inputs(lowerCamelCase_ )
UpperCamelCase = 25
UpperCamelCase = audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 8_1920
UpperCamelCase = audio[7_7230:7_7240]
UpperCamelCase = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
UpperCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCamelCase = audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = self.get_inputs(lowerCamelCase_ )
UpperCamelCase = audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 8_1920
UpperCamelCase = audio[2_7780:2_7790]
UpperCamelCase = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
UpperCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 343 | import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
# word like '180' or '身高' or '神'
for char in word:
UpperCamelCase = ord(UpperCamelCase_ )
if not _is_chinese_char(UpperCamelCase_ ):
return 0
return 1
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = set()
for token in tokens:
UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ )
if chinese_word:
word_set.add(UpperCamelCase_ )
UpperCamelCase = list(UpperCamelCase_ )
return word_list
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] )
UpperCamelCase = bert_tokens
UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ )
while start < end:
UpperCamelCase = True
if is_chinese(bert_word[start] ):
UpperCamelCase = min(end - start , UpperCamelCase_ )
for i in range(UpperCamelCase_ , 1 , -1 ):
UpperCamelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
UpperCamelCase = """##""" + bert_word[j]
UpperCamelCase = start + i
UpperCamelCase = False
break
if single_word:
start += 1
return bert_word
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0]
UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res]
ltp_res.extend(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for i in range(0 , len(UpperCamelCase_ ) , 100 ):
UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 )
bert_res.extend(res["""input_ids"""] )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
UpperCamelCase = []
for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase = []
for id in input_ids:
UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ )
input_tokens.append(UpperCamelCase_ )
UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(UpperCamelCase_ ):
if token[:2] == "##":
UpperCamelCase = token[2:]
# save chinese tokens' pos
if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ):
ref_id.append(UpperCamelCase_ )
ref_ids.append(UpperCamelCase_ )
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
return ref_ids
def lowercase( UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
UpperCamelCase = LTP(args.ltp ) # faster in GPU device
UpperCamelCase = BertTokenizer.from_pretrained(args.bert )
UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids]
f.writelines(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 343 | 1 |
from __future__ import annotations
from typing import Any
class __a ( __UpperCamelCase ):
pass
class __a :
def __init__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
lowercase__: Any = data
lowercase__: Node | None = None
def __iter__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = self
lowercase__: Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(lowerCAmelCase__ )
yield node.data
lowercase__: Optional[int] = node.next_node
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> bool:
'''simple docstring'''
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__lowerCAmelCase = Node(1)
__lowerCAmelCase = Node(2)
__lowerCAmelCase = Node(3)
__lowerCAmelCase = Node(4)
print(root_node.has_loop) # False
__lowerCAmelCase = root_node.next_node
print(root_node.has_loop) # True
__lowerCAmelCase = Node(5)
__lowerCAmelCase = Node(6)
__lowerCAmelCase = Node(5)
__lowerCAmelCase = Node(6)
print(root_node.has_loop) # False
__lowerCAmelCase = Node(1)
print(root_node.has_loop) # False
| 353 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __a ( __UpperCamelCase ):
__lowercase : Any = 'pegasus'
__lowercase : Union[str, Any] = ['past_key_values']
__lowercase : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowerCAmelCase__=50_265 , lowerCAmelCase__=1_024 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=12 , lowerCAmelCase__=4_096 , lowerCAmelCase__=16 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="gelu" , lowerCAmelCase__=1_024 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=0 , lowerCAmelCase__=False , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: int = vocab_size
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[str] = d_model
lowercase__: Optional[Any] = encoder_ffn_dim
lowercase__: Optional[Any] = encoder_layers
lowercase__: Union[str, Any] = encoder_attention_heads
lowercase__: Optional[int] = decoder_ffn_dim
lowercase__: Tuple = decoder_layers
lowercase__: Union[str, Any] = decoder_attention_heads
lowercase__: Dict = dropout
lowercase__: List[str] = attention_dropout
lowercase__: List[str] = activation_dropout
lowercase__: Optional[int] = activation_function
lowercase__: Dict = init_std
lowercase__: Optional[Any] = encoder_layerdrop
lowercase__: List[str] = decoder_layerdrop
lowercase__: Union[str, Any] = use_cache
lowercase__: Any = encoder_layers
lowercase__: List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.d_model
| 288 | 0 |
lowerCAmelCase : dict[str, float] = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_02_17_66_34e-19,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.355_818,
}
def A_ ( a , a , a ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
SCREAMING_SNAKE_CASE_ : Optional[int] = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(a )}"
)
raise ValueError(a )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 253 |
import math
def A_ ( a , a = 0 , a = 0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = end or len(a )
for i in range(a , a ):
SCREAMING_SNAKE_CASE_ : List[Any] = i
SCREAMING_SNAKE_CASE_ : Optional[Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
SCREAMING_SNAKE_CASE_ : Tuple = array[temp_index - 1]
temp_index -= 1
SCREAMING_SNAKE_CASE_ : str = temp_index_value
return array
def A_ ( a , a , a ): # Max Heap
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = index
SCREAMING_SNAKE_CASE_ : str = 2 * index + 1 # Left Node
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
SCREAMING_SNAKE_CASE_ : Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
SCREAMING_SNAKE_CASE_ : Any = right_index
if largest != index:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[largest], array[index]
heapify(a , a , a )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = len(a )
for i in range(n // 2 , -1 , -1 ):
heapify(a , a , a )
for i in range(n - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[0], array[i]
heapify(a , 0 , a )
return array
def A_ ( a , a , a , a ):
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A_ ( a , a , a , a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = low
SCREAMING_SNAKE_CASE_ : Tuple = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = array[j], array[i]
i += 1
def A_ ( a ):
"""simple docstring"""
if len(a ) == 0:
return array
SCREAMING_SNAKE_CASE_ : Any = 2 * math.ceil(math.loga(len(a ) ) )
SCREAMING_SNAKE_CASE_ : int = 1_6
return intro_sort(a , 0 , len(a ) , a , a )
def A_ ( a , a , a , a , a ):
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(a )
max_depth -= 1
SCREAMING_SNAKE_CASE_ : Optional[int] = median_of_a(a , a , start + ((end - start) // 2) + 1 , end - 1 )
SCREAMING_SNAKE_CASE_ : Dict = partition(a , a , a , a )
intro_sort(a , a , a , a , a )
SCREAMING_SNAKE_CASE_ : List[Any] = p
return insertion_sort(a , a , a )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] = input('Enter numbers separated by a comma : ').strip()
lowerCAmelCase : Optional[Any] = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 253 | 1 |
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase : Dict = [True] * 1_0_0_0_0_0_1
__UpperCamelCase : Optional[int] = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
__UpperCamelCase : Tuple = False
i += 1
def __SCREAMING_SNAKE_CASE ( A_ ):
return seive[n]
def __SCREAMING_SNAKE_CASE ( A_ ):
return any(digit in '''02468''' for digit in str(A_ ) )
def __SCREAMING_SNAKE_CASE ( A_ = 1_00_00_00 ):
lowerCAmelCase__ : List[str] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(A_ ) and not contains_an_even_digit(A_ ):
lowerCAmelCase__ : List[Any] = str(A_ )
lowerCAmelCase__ : Optional[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(A_ ) )]
if all(is_prime(A_ ) for i in list_nums ):
result.append(A_ )
return result
def __SCREAMING_SNAKE_CASE ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 74 |
"""simple docstring"""
from __future__ import annotations
import math
__UpperCamelCase : Dict = '''2020.9.26'''
__UpperCamelCase : Tuple = '''xcodz-dot, cclaus, dhruvmanila'''
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
if not all(isinstance(A_ , (float, int) ) for val in locals().values() ):
lowerCAmelCase__ : Optional[Any] = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(A_ )
lowerCAmelCase__ : Optional[Any] = ((x * distance) / (z + distance)) * scale
lowerCAmelCase__ : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ , A_ ):
if not isinstance(A_ , A_ ):
raise TypeError('''Axis must be a str''' )
lowerCAmelCase__ : str = locals()
del input_variables["axis"]
if not all(isinstance(A_ , (float, int) ) for val in input_variables.values() ):
lowerCAmelCase__ : int = (
'''Input values except axis must either be float or int: '''
f'{list(input_variables.values() )}'
)
raise TypeError(A_ )
lowerCAmelCase__ : Any = (angle % 3_60) / 4_50 * 1_80 / math.pi
if axis == "z":
lowerCAmelCase__ : Tuple = x * math.cos(A_ ) - y * math.sin(A_ )
lowerCAmelCase__ : List[str] = y * math.cos(A_ ) + x * math.sin(A_ )
lowerCAmelCase__ : Optional[Any] = z
elif axis == "x":
lowerCAmelCase__ : List[str] = y * math.cos(A_ ) - z * math.sin(A_ )
lowerCAmelCase__ : str = z * math.cos(A_ ) + y * math.sin(A_ )
lowerCAmelCase__ : Union[str, Any] = x
elif axis == "y":
lowerCAmelCase__ : Optional[int] = x * math.cos(A_ ) - z * math.sin(A_ )
lowerCAmelCase__ : Tuple = z * math.cos(A_ ) + x * math.sin(A_ )
lowerCAmelCase__ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }''')
print(F'''{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }''')
| 74 | 1 |
"""simple docstring"""
from math import factorial, radians
def A_ ( _lowercase, _lowercase = 18, _lowercase = 10 ):
'''simple docstring'''
snake_case_ :Tuple = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
snake_case_ :Tuple = radians(_lowercase )
snake_case_ :Dict = angle_in_radians
snake_case_ :Any = 3
snake_case_ :Dict = -1
for _ in range(_lowercase ):
result += (b * (angle_in_radians**a)) / factorial(_lowercase )
snake_case_ :Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_lowercase, _lowercase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 66 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
"""simple docstring"""
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Dict = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase : int = int(max(0 , i - limit ) )
_UpperCAmelCase : Any = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = F"""{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"""
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase : Union[str, Any] = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase : Tuple = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase : Optional[Any] = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase : Dict = 0.0
else:
_UpperCAmelCase : Optional[int] = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase : str = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 31 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCamelCase__ ( A : np.ndarray ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def lowerCamelCase__ ( A : np.ndarray ):
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def lowerCamelCase__ ( A : np.ndarray , A : np.ndarray ):
'''simple docstring'''
UpperCAmelCase = np.zeros_like(A )
UpperCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
UpperCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
UpperCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
UpperCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
_lowercase : Dict = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
_lowercase : str = np.array(Image.open(lena_path))
# kernel to be applied
_lowercase : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
_lowercase : List[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
_lowercase : List[str] = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 355 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 91 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
set_seed(770)
lowercase_ = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
lowercase_ = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
lowercase_ = os.path.dirname(os.path.abspath(__file__))
lowercase_ = os.path.join(os.path.expanduser("""~"""), """.cache""")
lowercase_ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple=False ) ->Any:
_SCREAMING_SNAKE_CASE = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : int ) ->Any:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
hf_hub_download(repo_id=__lowerCamelCase , filename=__lowerCamelCase , local_dir=__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]="text" ) ->Optional[int]:
if model_type == "text":
_SCREAMING_SNAKE_CASE = BarkSemanticModel
_SCREAMING_SNAKE_CASE = BarkSemanticConfig
_SCREAMING_SNAKE_CASE = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE = BarkCoarseModel
_SCREAMING_SNAKE_CASE = BarkCoarseConfig
_SCREAMING_SNAKE_CASE = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE = BarkFineModel
_SCREAMING_SNAKE_CASE = BarkFineConfig
_SCREAMING_SNAKE_CASE = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = F'{model_type}_small' if use_small else model_type
_SCREAMING_SNAKE_CASE = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCamelCase ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
# this is a hack
_SCREAMING_SNAKE_CASE = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
_SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE = model_args.pop("""n_head""" )
_SCREAMING_SNAKE_CASE = model_args.pop("""n_embd""" )
_SCREAMING_SNAKE_CASE = model_args.pop("""n_layer""" )
_SCREAMING_SNAKE_CASE = ConfigClass(**checkpoint["""model_args"""] )
_SCREAMING_SNAKE_CASE = ModelClass(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = GenerationConfigClass()
_SCREAMING_SNAKE_CASE = model_generation_config
_SCREAMING_SNAKE_CASE = checkpoint["""model"""]
# fixup checkpoint
_SCREAMING_SNAKE_CASE = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE = k[len(__lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE = new_k.replace(__lowerCamelCase , new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
_SCREAMING_SNAKE_CASE = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__lowerCamelCase ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(__lowerCamelCase ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = model.num_parameters(exclude_embeddings=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = checkpoint["""best_val_loss"""].item()
logger.info(F'model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowerCamelCase , 3 )} loss' )
model.eval()
model.to(__lowerCamelCase )
del checkpoint, state_dict
return model
def lowerCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=False , __lowerCamelCase : Union[str, Any]="text" ) ->Tuple:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = """cpu""" # do conversion on cpu
_SCREAMING_SNAKE_CASE = _get_ckpt_path(__lowerCamelCase , use_small=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = _load_model(__lowerCamelCase , __lowerCamelCase , model_type=__lowerCamelCase , use_small=__lowerCamelCase )
# load bark initial model
_SCREAMING_SNAKE_CASE = _bark_load_model(__lowerCamelCase , """cpu""" , model_type=__lowerCamelCase , use_small=__lowerCamelCase )
if model_type == "text":
_SCREAMING_SNAKE_CASE = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__lowerCamelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE = 5
_SCREAMING_SNAKE_CASE = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_SCREAMING_SNAKE_CASE = bark_model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# take last logits
_SCREAMING_SNAKE_CASE = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = bark_model(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , ) ->List[str]:
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCamelCase , """config.json""" ) )
_SCREAMING_SNAKE_CASE = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCamelCase , """config.json""" ) )
_SCREAMING_SNAKE_CASE = BarkFineConfig.from_pretrained(os.path.join(__lowerCamelCase , """config.json""" ) )
_SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE = BarkSemanticModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = BarkCoarseModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = BarkFineModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE = BarkConfig.from_sub_model_configs(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE = BarkModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = semantic
_SCREAMING_SNAKE_CASE = coarseAcoustic
_SCREAMING_SNAKE_CASE = fineAcoustic
_SCREAMING_SNAKE_CASE = codec
_SCREAMING_SNAKE_CASE = bark_generation_config
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
bark.save_pretrained(__lowerCamelCase , repo_id=__lowerCamelCase , push_to_hub=__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
lowercase_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 58 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67 | 0 |
'''simple docstring'''
import string
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Any = ''''''
for i in sequence:
UpperCAmelCase__ : List[Any] = ord(lowerCAmelCase__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : List[Any] = string.ascii_letters
UpperCAmelCase__ : Tuple = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence )
def a__ ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''' )
UpperCAmelCase__ : Dict = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCAmelCase__ )} seconds""" )
print(F"""> atbash(): {timeit("atbash(printable)" , setup=lowerCAmelCase__ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 299 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : int , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : List[Any] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Tuple , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_A : Any , **_A : int ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : List[Any] , **_A : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Dict , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : Any , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : int , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_A : Optional[int] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *_A : Optional[int] , **_A : int ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *_A : Union[str, Any] , **_A : Dict ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : str , **_A : List[str] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : str , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 299 | 1 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
_UpperCamelCase : Any = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
_UpperCamelCase : str = parser.parse_args()
if args.check_lib:
_UpperCamelCase : List[Any] = importlib.import_module("transformers")
_UpperCamelCase : str = Path(transformers_module.__file__).parent
else:
_UpperCamelCase : List[str] = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 77 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A__ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
snake_case__ : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 230 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 369 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''openai/whisper-base'''
__lowerCAmelCase = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
__lowerCAmelCase = '''transcriber'''
__lowerCAmelCase = WhisperProcessor
__lowerCAmelCase = WhisperForConditionalGeneration
__lowerCAmelCase = ['''audio''']
__lowerCAmelCase = ['''text''']
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor(_UpperCAmelCase , return_tensors='''pt''' ).input_features
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.model.generate(inputs=_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.pre_processor.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )[0] | 188 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'audio': Audio()} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'transcription': Value('string' )} )
SCREAMING_SNAKE_CASE : str = "audio"
SCREAMING_SNAKE_CASE : str = "transcription"
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any] ):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] ,lowercase__ ):
raise ValueError(F"Column {self.audio_column} is not an Audio type." )
__lowercase = copy.deepcopy(self )
__lowercase = self.input_schema.copy()
__lowercase = features[self.audio_column]
__lowercase = input_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 104 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
UpperCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
UpperCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
UpperCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : List[Any]) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> List[Any]:
"""simple docstring"""
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Optional[int] , A : List[Any]=0.9 , A : Optional[Any]=3 , A : Optional[int]=0.5) -> Any:
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5'):
_UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(A) , word_tokenize(A) , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
else:
_UpperCAmelCase = [
meteor_score.single_meteor_score(A , A , alpha=A , beta=A , gamma=A)
for ref, pred in zip(A , A)
]
return {"meteor": np.mean(A)}
| 339 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81 | # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :torch.FloatTensor
lowerCamelCase :torch.FloatTensor
class a ( __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :int = 1
@register_to_config
def __init__( self , lowerCAmelCase_ = 20_00 , lowerCAmelCase_ = 0.15 , lowerCAmelCase_ = 0.01 , lowerCAmelCase_ = 1348.0 , lowerCAmelCase_ = 1E-5 , lowerCAmelCase_ = 1 , ) -> Tuple:
# standard deviation of the initial noise distribution
_A = sigma_max
# setable values
_A = None
self.set_sigmas(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> torch.FloatTensor:
return sample
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Tuple:
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_A = torch.linspace(1 , lowerCAmelCase_ , lowerCAmelCase_ , device=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None ) -> Any:
_A = sigma_min if sigma_min is not None else self.config.sigma_min
_A = sigma_max if sigma_max is not None else self.config.sigma_max
_A = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ )
_A = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_A = torch.exp(torch.linspace(math.log(lowerCAmelCase_ ) , math.log(lowerCAmelCase_ ) , lowerCAmelCase_ ) )
_A = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
_A = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
_A = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_A = timesteps.to(self.discrete_sigmas.device )
_A = self.discrete_sigmas[timesteps].to(sample.device )
_A = self.get_adjacent_sigma(lowerCAmelCase_ , lowerCAmelCase_ ).to(sample.device )
_A = torch.zeros_like(lowerCAmelCase_ )
_A = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_A = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
_A = diffusion.unsqueeze(-1 )
_A = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_A = randn_tensor(
sample.shape , layout=sample.layout , generator=lowerCAmelCase_ , device=sample.device , dtype=sample.dtype )
_A = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_A = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowerCAmelCase_ , prev_sample_mean=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_A = randn_tensor(sample.shape , layout=sample.layout , generator=lowerCAmelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
_A = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
_A = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
_A = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_A = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_A = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
_A = step_size.unsqueeze(-1 )
_A = sample + step_size * model_output
_A = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A = timesteps.to(original_samples.device )
_A = self.discrete_sigmas.to(original_samples.device )[timesteps]
_A = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowerCAmelCase_ ) * sigmas[:, None, None, None]
)
_A = noise + original_samples
return noisy_samples
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 81 | 1 |
def a__ ( A_, A_ ):
'''simple docstring'''
return base * power(A_, (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
__lowerCAmelCase : int = int(input('Enter the base: ').strip())
__lowerCAmelCase : Optional[int] = int(input('Enter the exponent: ').strip())
__lowerCAmelCase : Dict = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
__lowerCAmelCase : Union[str, Any] = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 88 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 357 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( __lowerCamelCase : str = "AAPL" ):
snake_case : List[Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
snake_case : Tuple = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" )
snake_case : Dict = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'Current {symbol:<4} stock price is {stock_price(symbol):>8}')
| 10 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _a ( unittest.TestCase ):
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = self.dummy_uncond_unet
UpperCAmelCase = KarrasVeScheduler()
UpperCAmelCase = KarrasVePipeline(unet=lowercase , scheduler=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(num_inference_steps=2 , generator=lowercase , output_type='''numpy''' ).images
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(num_inference_steps=2 , generator=lowercase , output_type='''numpy''' , return_dict=lowercase )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class _a ( unittest.TestCase ):
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = '''google/ncsnpp-celebahq-256'''
UpperCAmelCase = UNetaDModel.from_pretrained(lowercase )
UpperCAmelCase = KarrasVeScheduler()
UpperCAmelCase = KarrasVePipeline(unet=lowercase , scheduler=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = pipe(num_inference_steps=20 , generator=lowercase , output_type='''numpy''' ).images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 34 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
def __init__( self : str , *a : Dict , **a : int ):
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase ) | 360 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 307 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a ( _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def a ( _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : int = emb.weight.shape
__UpperCAmelCase : str = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
__UpperCAmelCase : Any = emb.weight.data
return lin_layer
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any="facebook/mbart-large-en-ro" , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[int]=False ):
'''simple docstring'''
__UpperCAmelCase : int = torch.load(_UpperCAmelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__UpperCAmelCase : List[Any] = MBartConfig.from_pretrained(_UpperCAmelCase , vocab_size=_UpperCAmelCase )
if mbart_aa and finetuned:
__UpperCAmelCase : Optional[int] = '''relu'''
__UpperCAmelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
__UpperCAmelCase : int = MBartForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase )
if finetuned:
__UpperCAmelCase : Dict = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__A =parser.parse_args()
__A =convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 226 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCAmelCase__ :
'''simple docstring'''
UpperCamelCase = None
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__UpperCAmelCase : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , a_ )
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(a_ , '''feat_extract.json''' )
feat_extract_first.to_json_file(a_ )
__UpperCAmelCase : Any = self.feature_extraction_class.from_json_file(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : List[str] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
__UpperCAmelCase : Optional[Any] = self.feature_extraction_class.from_pretrained(a_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : int = self.feature_extraction_class()
self.assertIsNotNone(a_ )
| 226 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__(self : List[str] , a__ : Tuple , ):
"""simple docstring"""
__snake_case = parent
__snake_case = 13
__snake_case = 7
__snake_case = 30
__snake_case = self.seq_length + self.mem_len
__snake_case = 15
__snake_case = True
__snake_case = True
__snake_case = 99
__snake_case = [10, 50, 80]
__snake_case = 32
__snake_case = 32
__snake_case = 4
__snake_case = 8
__snake_case = 128
__snake_case = 2
__snake_case = 2
__snake_case = None
__snake_case = 1
__snake_case = 0
__snake_case = 3
__snake_case = self.vocab_size - 1
__snake_case = 0.0_1
def a (self : Dict ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def a (self : List[Any] ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def a (self : List[Any] , a__ : List[Any] , a__ : Tuple , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = TFTransfoXLModel(a__ )
__snake_case , __snake_case = model(a__ ).to_tuple()
__snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a}
__snake_case , __snake_case = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def a (self : List[Any] , a__ : List[Any] , a__ : int , a__ : Dict , a__ : int ):
"""simple docstring"""
__snake_case = TFTransfoXLLMHeadModel(a__ )
__snake_case , __snake_case = model(a__ ).to_tuple()
__snake_case = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
__snake_case , __snake_case = model(a__ ).to_tuple()
__snake_case , __snake_case = model([input_ids_a, mems_a] ).to_tuple()
__snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
__snake_case , __snake_case = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def a (self : Dict , a__ : List[str] , a__ : Optional[Any] , a__ : str , a__ : Tuple ):
"""simple docstring"""
__snake_case = TFTransfoXLForSequenceClassification(a__ )
__snake_case = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) = config_and_inputs
__snake_case = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Optional[Any] = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
A_ : Tuple = () if is_tf_available() else ()
A_ : Tuple = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
A_ : List[str] = False
A_ : Optional[int] = False
A_ : Optional[int] = False
A_ : Optional[Any] = False
def a (self : Optional[Any] , a__ : Any , a__ : List[str] , a__ : List[str] , a__ : str , a__ : Union[str, Any] ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def a (self : Dict ):
"""simple docstring"""
__snake_case = TFTransfoXLModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , d_embed=37 )
def a (self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : Union[str, Any] ):
"""simple docstring"""
self.model_tester.set_seed()
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def a (self : str ):
"""simple docstring"""
self.model_tester.set_seed()
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def a (self : Any ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__snake_case = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
__snake_case = model.get_bias()
assert name is None
else:
__snake_case = model.get_output_embeddings()
assert x is None
__snake_case = model.get_bias()
assert name is None
def a (self : Any ):
"""simple docstring"""
pass
@slow
def a (self : Dict ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def a (self : Tuple ):
"""simple docstring"""
pass
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def a (self : Dict ):
"""simple docstring"""
__snake_case = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
__snake_case = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__snake_case = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__snake_case = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 355 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Optional[int] = CycleDiffusionPipeline
A_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
A_ : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
A_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
A_ : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
A_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def a (self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__snake_case = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__snake_case = CLIPTextModel(a__ )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a (self : List[str] , a__ : Tuple , a__ : Optional[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
__snake_case = image / 2 + 0.5
if str(a__ ).startswith('''mps''' ):
__snake_case = torch.manual_seed(a__ )
else:
__snake_case = torch.Generator(device=a__ ).manual_seed(a__ )
__snake_case = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def a (self : str ):
"""simple docstring"""
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = CycleDiffusionPipeline(**a__ )
__snake_case = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs(a__ )
__snake_case = pipe(**a__ )
__snake_case = output.images
__snake_case = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__snake_case = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.get_dummy_components()
for name, module in components.items():
if hasattr(a__ , '''half''' ):
__snake_case = module.half()
__snake_case = CycleDiffusionPipeline(**a__ )
__snake_case = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
__snake_case = self.get_dummy_inputs(a__ )
__snake_case = pipe(**a__ )
__snake_case = output.images
__snake_case = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__snake_case = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def a (self : Any ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def a (self : Any ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def a (self : Any ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a (self : str ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def a (self : Dict ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a (self : Tuple ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
__snake_case = init_image.resize((512, 512) )
__snake_case = '''CompVis/stable-diffusion-v1-4'''
__snake_case = DDIMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
__snake_case = CycleDiffusionPipeline.from_pretrained(
a__ , scheduler=a__ , safety_checker=a__ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__snake_case = '''A black colored car'''
__snake_case = '''A blue colored car'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a__ , source_prompt=a__ , image=a__ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def a (self : Tuple ):
"""simple docstring"""
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
__snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
__snake_case = init_image.resize((512, 512) )
__snake_case = '''CompVis/stable-diffusion-v1-4'''
__snake_case = DDIMScheduler.from_pretrained(a__ , subfolder='''scheduler''' )
__snake_case = CycleDiffusionPipeline.from_pretrained(a__ , scheduler=a__ , safety_checker=a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
__snake_case = '''A black colored car'''
__snake_case = '''A blue colored car'''
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a__ , source_prompt=a__ , image=a__ , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=a__ , output_type='''np''' , )
__snake_case = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 238 | 0 |
'''simple docstring'''
class __snake_case :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : int=None , lowerCamelCase : int=None ) -> str:
lowerCAmelCase_ : str = data
lowerCAmelCase_ : Optional[Any] = previous
lowerCAmelCase_ : int = next_node
def __str__( self : Any ) -> str:
return F'{self.data}'
def __lowercase ( self : Optional[Any] ) -> int:
return self.data
def __lowercase ( self : str ) -> List[str]:
return self.next
def __lowercase ( self : int ) -> Optional[int]:
return self.previous
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = head
def __iter__( self : str ) -> Optional[Any]:
return self
def __lowercase ( self : Union[str, Any] ) -> Dict:
if not self.current:
raise StopIteration
else:
lowerCAmelCase_ : Dict = self.current.get_data()
lowerCAmelCase_ : Tuple = self.current.get_next()
return value
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Any:
lowerCAmelCase_ : Optional[Any] = None # First node in list
lowerCAmelCase_ : Optional[Any] = None # Last node in list
def __str__( self : Optional[int] ) -> Dict:
lowerCAmelCase_ : str = self.head
lowerCAmelCase_ : Tuple = []
while current is not None:
nodes.append(current.get_data() )
lowerCAmelCase_ : str = current.get_next()
return " ".join(str(lowerCamelCase ) for node in nodes )
def __contains__( self : List[Any] , lowerCamelCase : int ) -> List[str]:
lowerCAmelCase_ : List[str] = self.head
while current:
if current.get_data() == value:
return True
lowerCAmelCase_ : List[Any] = current.get_next()
return False
def __iter__( self : str ) -> Optional[Any]:
return LinkedListIterator(self.head )
def __lowercase ( self : Dict ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def __lowercase ( self : List[str] ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def __lowercase ( self : Optional[Any] , lowerCamelCase : Node ) -> None:
if self.head is None:
lowerCAmelCase_ : Union[str, Any] = node
lowerCAmelCase_ : List[str] = node
else:
self.insert_before_node(self.head , lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : Node ) -> None:
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.insert_after_node(self.tail , lowerCamelCase )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : int = Node(lowerCamelCase )
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.set_tail(lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Node , lowerCamelCase : Node ) -> None:
lowerCAmelCase_ : Optional[int] = node
lowerCAmelCase_ : List[Any] = node.previous
if node.get_previous() is None:
lowerCAmelCase_ : Tuple = node_to_insert
else:
lowerCAmelCase_ : Dict = node_to_insert
lowerCAmelCase_ : Optional[int] = node_to_insert
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Node , lowerCamelCase : Node ) -> None:
lowerCAmelCase_ : Optional[int] = node
lowerCAmelCase_ : Tuple = node.next
if node.get_next() is None:
lowerCAmelCase_ : Tuple = node_to_insert
else:
lowerCAmelCase_ : Tuple = node_to_insert
lowerCAmelCase_ : Optional[Any] = node_to_insert
def __lowercase ( self : Dict , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Tuple = Node(lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase , lowerCamelCase )
return
current_position += 1
lowerCAmelCase_ : str = node.next
self.insert_after_node(self.tail , lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : int ) -> Node:
lowerCAmelCase_ : List[Any] = self.head
while node:
if node.get_data() == item:
return node
lowerCAmelCase_ : List[Any] = node.get_next()
raise Exception("""Node not found""" )
def __lowercase ( self : str , lowerCamelCase : str ) -> int:
if (node := self.get_node(lowerCamelCase )) is not None:
if node == self.head:
lowerCAmelCase_ : Any = self.head.get_next()
if node == self.tail:
lowerCAmelCase_ : Optional[int] = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase )
@staticmethod
def __lowercase ( lowerCamelCase : Node ) -> None:
if node.get_next():
lowerCAmelCase_ : Tuple = node.previous
if node.get_previous():
lowerCAmelCase_ : Any = node.next
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = None
def __lowercase ( self : str ) -> Optional[Any]:
return self.head is None
def UpperCamelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'gpt_neox'
def __init__( self : Optional[int] , lowerCamelCase : Tuple=5_04_32 , lowerCamelCase : Optional[int]=61_44 , lowerCamelCase : Tuple=44 , lowerCamelCase : Any=64 , lowerCamelCase : List[Any]=2_45_76 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Optional[Any]=0.25 , lowerCamelCase : Any=1_00_00 , lowerCamelCase : Any=0.0 , lowerCamelCase : str=0.0 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=20_48 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Any=1E-5 , lowerCamelCase : Dict=True , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[str]=2 , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[Any] = rotary_pct
lowerCAmelCase_ : Any = rotary_emb_base
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : Union[str, Any] = hidden_dropout
lowerCAmelCase_ : Tuple = classifier_dropout
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : str = tie_word_embeddings
lowerCAmelCase_ : str = use_parallel_residual
lowerCAmelCase_ : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __lowercase ( self : List[str] ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
lowerCAmelCase_ : Optional[Any] = self.rope_scaling.get("""type""" , lowerCamelCase )
lowerCAmelCase_ : int = self.rope_scaling.get("""factor""" , lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase , lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 120 | 1 |
import math
def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( SCREAMING_SNAKE_CASE_ : Any = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowercase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a__ ( nn.Module ):
def __init__( self , _A , _A , _A , _A=0.0 , _A = None , _A = "geglu" , _A = None , _A = False , _A = False , _A = False , _A = False , _A = True , _A = "layer_norm" , _A = False , ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = only_cross_attention
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
__lowerCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__lowerCAmelCase = AdaLayerNorm(_A , _A )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase = AdaLayerNormZero(_A , _A )
else:
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A )
__lowerCAmelCase = Attention(
query_dim=_A , heads=_A , dim_head=_A , dropout=_A , bias=_A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__lowerCAmelCase = (
AdaLayerNorm(_A , _A )
if self.use_ada_layer_norm
else nn.LayerNorm(_A , elementwise_affine=_A )
)
__lowerCAmelCase = Attention(
query_dim=_A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_A , dim_head=_A , dropout=_A , bias=_A , upcast_attention=_A , ) # is self-attn if encoder_hidden_states is none
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
# 3. Feed-forward
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A )
__lowerCAmelCase = FeedForward(_A , dropout=_A , activation_fn=_A , final_dropout=_A )
# let chunk size default to None
__lowerCAmelCase = None
__lowerCAmelCase = 0
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = chunk_size
__lowerCAmelCase = dim
def __SCREAMING_SNAKE_CASE( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
__lowerCAmelCase = self.norma(_A , _A )
elif self.use_ada_layer_norm_zero:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.norma(
_A , _A , _A , hidden_dtype=hidden_states.dtype )
else:
__lowerCAmelCase = self.norma(_A )
__lowerCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__lowerCAmelCase = self.attna(
_A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_A , **_A , )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
__lowerCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__lowerCAmelCase = (
self.norma(_A , _A ) if self.use_ada_layer_norm else self.norma(_A )
)
__lowerCAmelCase = self.attna(
_A , encoder_hidden_states=_A , attention_mask=_A , **_A , )
__lowerCAmelCase = attn_output + hidden_states
# 3. Feed-forward
__lowerCAmelCase = self.norma(_A )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__lowerCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__lowerCAmelCase = torch.cat(
[self.ff(_A ) for hid_slice in norm_hidden_states.chunk(_A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__lowerCAmelCase = self.ff(_A )
if self.use_ada_layer_norm_zero:
__lowerCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
__lowerCAmelCase = ff_output + hidden_states
return hidden_states
class a__ ( nn.Module ):
def __init__( self , _A , _A = None , _A = 4 , _A = 0.0 , _A = "geglu" , _A = False , ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = int(dim * mult )
__lowerCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__lowerCAmelCase = GELU(_A , _A )
if activation_fn == "gelu-approximate":
__lowerCAmelCase = GELU(_A , _A , approximate="tanh" )
elif activation_fn == "geglu":
__lowerCAmelCase = GEGLU(_A , _A )
elif activation_fn == "geglu-approximate":
__lowerCAmelCase = ApproximateGELU(_A , _A )
__lowerCAmelCase = nn.ModuleList([] )
# project in
self.net.append(_A )
# project dropout
self.net.append(nn.Dropout(_A ) )
# project out
self.net.append(nn.Linear(_A , _A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_A ) )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
for module in self.net:
__lowerCAmelCase = module(_A )
return hidden_states
class a__ ( nn.Module ):
def __init__( self , _A , _A , _A = "none" ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Linear(_A , _A )
__lowerCAmelCase = approximate
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.proj(_A )
__lowerCAmelCase = self.gelu(_A )
return hidden_states
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Linear(_A , dim_out * 2 )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(_A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.proj(_A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_A )
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Linear(_A , _A )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.proj(_A )
return x * torch.sigmoid(1.7_02 * x )
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = nn.Embedding(_A , _A )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(_A , embedding_dim * 2 )
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = self.linear(self.silu(self.emb(_A ) ) )
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(_A , 2 )
__lowerCAmelCase = self.norm(_A ) * (1 + scale) + shift
return x
class a__ ( nn.Module ):
def __init__( self , _A , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = CombinedTimestepLabelEmbeddings(_A , _A )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Linear(_A , 6 * embedding_dim , bias=_A )
__lowerCAmelCase = nn.LayerNorm(_A , elementwise_affine=_A , eps=1E-6 )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=None ):
"""simple docstring"""
__lowerCAmelCase = self.linear(self.silu(self.emb(_A , _A , hidden_dtype=_A ) ) )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = emb.chunk(6 , dim=1 )
__lowerCAmelCase = self.norm(_A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a__ ( nn.Module ):
def __init__( self , _A , _A , _A , _A = None , _A = 1E-5 ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = num_groups
__lowerCAmelCase = eps
if act_fn is None:
__lowerCAmelCase = None
else:
__lowerCAmelCase = get_activation(_A )
__lowerCAmelCase = nn.Linear(_A , out_dim * 2 )
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
if self.act:
__lowerCAmelCase = self.act(_A )
__lowerCAmelCase = self.linear(_A )
__lowerCAmelCase = emb[:, :, None, None]
__lowerCAmelCase , __lowerCAmelCase = emb.chunk(2 , dim=1 )
__lowerCAmelCase = F.group_norm(_A , self.num_groups , eps=self.eps )
__lowerCAmelCase = x * (1 + scale) + shift
return x
| 102 | 0 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def a__ ( lowercase : Optional[Any] ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = filter(lambda lowercase : p.requires_grad, model.parameters() )
_UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowercase__ : Tuple = logging.getLogger(__name__)
def a__ ( lowercase : Union[str, Any], lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
if metric == "rouge2":
_UpperCamelCase = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_UpperCamelCase = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_UpperCamelCase = '''{val_avg_em:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_UpperCamelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase, filename=_UpperCAmelCase, monitor=F"""val_{metric}""", mode='''max''', save_top_k=3, every_n_epochs=1, )
return checkpoint_callback
def a__ ( lowercase : Tuple, lowercase : Optional[int] ) -> str:
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""", mode='''min''' if '''loss''' in metric else '''max''', patience=_UpperCAmelCase, verbose=_UpperCAmelCase, )
class __lowerCAmelCase ( pl.Callback ):
"""simple docstring"""
def snake_case__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCAmelCase__ )
@rank_zero_only
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any=True ) -> None:
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_UpperCamelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_UpperCamelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCamelCase = od / '''test_results.txt'''
_UpperCamelCase = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCamelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_UpperCamelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
generations_file.parent.mkdir(exist_ok=lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''a+''' ) as writer:
for key in sorted(lowerCAmelCase__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCamelCase = metrics[key]
if isinstance(lowerCAmelCase__ , torch.Tensor ):
_UpperCamelCase = val.item()
_UpperCamelCase = f"""{key}: {val:.6f}\n"""
writer.write(lowerCAmelCase__ )
if not save_generations:
return
if "preds" in metrics:
_UpperCamelCase = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCAmelCase__ )
@rank_zero_only
def snake_case__ ( self : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] ) -> int:
'''simple docstring'''
try:
_UpperCamelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCamelCase = pl_module.model.num_parameters()
_UpperCamelCase = count_trainable_parameters(lowerCAmelCase__ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def snake_case__ ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCAmelCase__ , lowerCAmelCase__ , '''test''' )
@rank_zero_only
def snake_case__ ( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 324 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase__ (_UpperCAmelCase=None):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser('env')
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser('Accelerate env command')
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help='The config file to use for the default values in the launching script.')
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase)
return parser
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.__version__
SCREAMING_SNAKE_CASE = torch.cuda.is_available()
SCREAMING_SNAKE_CASE = is_xpu_available()
SCREAMING_SNAKE_CASE = is_npu_available()
SCREAMING_SNAKE_CASE = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file).to_dict()
SCREAMING_SNAKE_CASE = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'PyTorch XPU available': str(_UpperCAmelCase),
'PyTorch NPU available': str(_UpperCAmelCase),
'System RAM': F'''{psutil.virtual_memory().total / 1024 ** 3:.2f} GB''',
}
if pt_cuda_available:
SCREAMING_SNAKE_CASE = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n')
print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()]))
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:')
SCREAMING_SNAKE_CASE = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()])
if isinstance(_UpperCAmelCase , _UpperCAmelCase)
else F'''\t{accelerate_config}'''
)
print(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = accelerate_config
return info
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = env_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
env_command(_UpperCAmelCase)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 137 | 0 |
'''simple docstring'''
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = len(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = len(matrix[0] )
_UpperCAmelCase : str = min(lowerCAmelCase_ , lowerCAmelCase_ )
for row in range(lowerCAmelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCAmelCase_ ):
_UpperCAmelCase : int = matrix[col][row] / matrix[row][row]
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_UpperCAmelCase : List[str] = True
for i in range(row + 1 , lowerCAmelCase_ ):
if matrix[i][row] != 0:
_UpperCAmelCase : List[str] = matrix[i], matrix[row]
_UpperCAmelCase : Optional[int] = False
break
if reduce:
rank -= 1
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : int = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : List[str] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Tuple = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
lowerCAmelCase_ : str = '''▁'''
class __lowerCAmelCase ( __a ):
snake_case : List[str] = VOCAB_FILES_NAMES
snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : str = ["""input_ids""", """attention_mask"""]
snake_case : List[Any] = BarthezTokenizer
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , **lowerCAmelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : List[str] = vocab_file
_UpperCAmelCase : Tuple = False if not self.vocab_file else True
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
_UpperCAmelCase : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : str = [self.sep_token_id]
_UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_UpperCAmelCase : Union[str, Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 170 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[Any] ={
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] =[
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 189 |
'''simple docstring'''
def a ( __a ) -> "list[int]":
'''simple docstring'''
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
UpperCamelCase__ :Optional[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
UpperCamelCase__ :int = 1
if upper_limit > 0:
UpperCamelCase__ :int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__a ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
__snake_case = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 97 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
A: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A: Dict = 1_2_8_0_2_2
A: str = 1_2_8_0_2_8
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : Any = MaMaaaTokenizer
__lowerCAmelCase : Any = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Optional[Any] = True
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
UpperCAmelCase : int = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCAmelCase : Optional[int] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : str = Path(self.tmpdirname )
save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
UpperCAmelCase : Union[str, Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = """</s>"""
UpperCAmelCase : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : int = self.get_tokenizer()
UpperCAmelCase : Dict = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<s>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("""Skip this test while all models are still to be uploaded.""" )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_tokenizer()
UpperCAmelCase : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [2, 3, 4, 5, 6] , )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
UpperCAmelCase : Dict = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , """This is a test""" )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any = {"""input_ids""": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""facebook/m2m100_418M""" , revision="""c168bae485c864188cf9aa0e4108b0b6934dc91e""" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
__lowerCAmelCase : Tuple = 'facebook/m2m100_418M'
__lowerCAmelCase : List[Any] = [
'In my opinion, there are two levels of response from the French government.',
'NSA Affair Emphasizes Complete Lack of Debate on Intelligence',
]
__lowerCAmelCase : Optional[Any] = [
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
]
# fmt: off
__lowerCAmelCase : Union[str, Any] = [EN_CODE, 593, 1_949, 115_781, 4, 71_586, 4_234, 60_633, 126_233, 432, 123_808, 15_592, 1_197, 117_132, 120_618, 5, 2]
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en""" , tgt_lang="""fr""" )
UpperCAmelCase : Optional[int] = 1
return cls
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id("""ar""" ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id("""en""" ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id("""ro""" ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id("""mr""" ) , 128063 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : str = self.tokenizer.get_vocab()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["""<unk>"""] , 3 )
self.assertIn(self.tokenizer.get_lang_token("""en""" ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = """en"""
UpperCAmelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
self.assertIn(_SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase : List[Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase : List[str] = self.tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertNotIn(self.tokenizer.eos_token , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = MaMaaaTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(new_tok.lang_token_to_id , _SCREAMING_SNAKE_CASE )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] = """en"""
UpperCAmelCase : Dict = """fr"""
UpperCAmelCase : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
UpperCAmelCase : Tuple = shift_tokens_right(
batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCAmelCase : int = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCAmelCase : int = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""mr""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCAmelCase : Union[str, Any] = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("""zh""" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Dict = self.tokenizer._build_translation_inputs("""A test""" , return_tensors="""pt""" , src_lang="""en""" , tgt_lang="""ar""" )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE ) , {
# en_XX, A, test, EOS
"""input_ids""": [[128022, 58, 4183, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 128006,
} , )
| 76 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
UpperCAmelCase : Tuple = bertabert.config.encoder.vocab_size
UpperCAmelCase : int = tokenizer.sep_token_id
UpperCAmelCase : Dict = tokenizer.cls_token_id
UpperCAmelCase : int = 128
UpperCAmelCase : List[str] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
UpperCAmelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
UpperCAmelCase : Optional[int] = train_dataset.select(range(32 ) )
UpperCAmelCase : int = val_dataset.select(range(16 ) )
UpperCAmelCase : List[str] = 4
def _map_to_encoder_decoder_inputs(_SCREAMING_SNAKE_CASE ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase : str = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
UpperCAmelCase : str = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=128 )
UpperCAmelCase : Optional[Any] = inputs.input_ids
UpperCAmelCase : Union[str, Any] = inputs.attention_mask
UpperCAmelCase : Union[str, Any] = outputs.input_ids
UpperCAmelCase : Any = outputs.input_ids.copy()
UpperCAmelCase : Tuple = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
UpperCAmelCase : List[Any] = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = pred.label_ids
UpperCAmelCase : Tuple = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase : List[Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
UpperCAmelCase : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Dict = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 76 | 1 |
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__lowerCamelCase : Any = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE__ = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE__ = get_cluster_input()
return config
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict=None ) -> List[str]:
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE__ = subparsers.add_parser("""config""" , description=_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser("""Accelerate config command""" , description=_lowerCAmelCase )
parser.add_argument(
"""--config_file""" , default=_lowerCAmelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE__ = args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = config_command_parser()
SCREAMING_SNAKE_CASE__ = parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 219 |
'''simple docstring'''
from __future__ import annotations
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =[True] * limit
__lowercase =False
__lowercase =False
__lowercase =True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__lowercase =i * 2
while index < limit:
__lowercase =False
__lowercase =index + i
__lowercase =[2]
for i in range(3 , _lowerCAmelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCAmelCase )
return primes
def _A ( _lowerCAmelCase = 1_000_000 ):
"""simple docstring"""
__lowercase =prime_sieve(_lowerCAmelCase )
__lowercase =0
__lowercase =0
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + length , len(_lowerCAmelCase ) ):
__lowercase =sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__lowercase =j - i
__lowercase =sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 166 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
__snake_case : Union[str, Any] = iter(_lowerCamelCase )
while True:
__snake_case : str = tuple(itertools.islice(_lowerCamelCase , _lowerCamelCase ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] ) -> str:
'''simple docstring'''
__snake_case : Optional[int] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
__snake_case : List[Any] = """"""
if len(_lowerCamelCase ) < 2:
return dirty
for i in range(len(_lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCamelCase ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> list[str]:
'''simple docstring'''
__snake_case : List[str] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__snake_case : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCamelCase )
return table
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
__snake_case : Dict = generate_table(_lowerCamelCase )
__snake_case : int = prepare_input(_lowerCamelCase )
__snake_case : List[Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
__snake_case : Union[str, Any] = divmod(table.index(_lowerCamelCase ) , 5 )
__snake_case : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ) -> str:
'''simple docstring'''
__snake_case : Optional[Any] = generate_table(_lowerCamelCase )
__snake_case : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
__snake_case : List[str] = divmod(table.index(_lowerCamelCase ) , 5 )
__snake_case : List[str] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 363 | """simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_a : Optional[int]= logging.get_logger()
@dataclass
class UpperCamelCase :
UpperCAmelCase : nn.Module
UpperCAmelCase : List[nn.Module] = field(default_factory=lowercase )
UpperCAmelCase : list = field(default_factory=lowercase )
def _lowercase (self : str , _A : Optional[Any] , _A : Tensor , _A : Tensor) -> Any:
__snake_case : str = len(list(m.modules())) == 1 or isinstance(_A , nn.Convad) or isinstance(_A , nn.BatchNormad)
if has_not_submodules:
self.traced.append(_A)
def __call__(self : Dict , _A : Tensor) -> Optional[Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(_A)
[x.remove() for x in self.handles]
return self
@property
def _lowercase (self : Union[str, Any]) -> List[str]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _A: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class UpperCamelCase :
UpperCAmelCase : nn.Module
UpperCAmelCase : nn.Module
UpperCAmelCase : int = 0
UpperCAmelCase : List = field(default_factory=lowercase )
UpperCAmelCase : List = field(default_factory=lowercase )
def __call__(self : List[str] , _A : Tensor) -> List[Any]:
__snake_case : Any = Tracker(self.dest)(_A).parametrized
__snake_case : int = Tracker(self.src)(_A).parametrized
__snake_case : List[Any] = list(filter(lambda _A: type(_A) not in self.src_skip , _A))
__snake_case : Any = list(filter(lambda _A: type(_A) not in self.dest_skip , _A))
if len(_A) != len(_A):
raise Exception(
f"Numbers of operations are different. Source module has {len(_A)} operations while"
f" destination module has {len(_A)}.")
for dest_m, src_m in zip(_A , _A):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}")
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : ResNetConfig , UpperCAmelCase_ : Path , UpperCAmelCase_ : bool = True ) -> List[str]:
'''simple docstring'''
print(F"Converting {name}..." )
with torch.no_grad():
__snake_case : Dict = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ ).eval()
__snake_case : List[Any] = ResNetForImageClassification(UpperCAmelCase_ ).eval()
__snake_case : int = ModuleTransfer(src=UpperCAmelCase_ , dest=UpperCAmelCase_ )
__snake_case : Optional[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(UpperCAmelCase_ )
assert torch.allclose(from_model(UpperCAmelCase_ ) , our_model(UpperCAmelCase_ ).logits ), "The model logits don't match the original one."
__snake_case : str = F"resnet{'-'.join(name.split('resnet' ) )}"
print(UpperCAmelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCAmelCase_ , )
# we can use the convnext one
__snake_case : int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase_ , )
print(F"Pushed {checkpoint_name}" )
def __UpperCAmelCase ( UpperCAmelCase_ : Path , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : str = 'imagenet-1k-id2label.json'
__snake_case : Optional[Any] = 10_00
__snake_case : Any = (1, num_labels)
__snake_case : List[Any] = 'huggingface/label-files'
__snake_case : Dict = num_labels
__snake_case : Any = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
__snake_case : Any = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__snake_case : Optional[Any] = idalabel
__snake_case : Optional[Any] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[int] = partial(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ )
__snake_case : str = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase_ , names_to_config[model_name] , UpperCAmelCase_ , UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
_a : Optional[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
_a : Union[str, Any]= parser.parse_args()
_a : Path= args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 95 | 0 |
def _UpperCAmelCase (UpperCamelCase__ : str ):
return "".join(chr(ord(UpperCamelCase__ ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 11 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 | 1 |
"""simple docstring"""
A__ : Optional[int] = 65_521
def _snake_case ( lowerCamelCase__ : str ) -> int:
lowerCamelCase_ : Optional[Any] =1
lowerCamelCase_ : Union[str, Any] =0
for plain_chr in plain_text:
lowerCamelCase_ : int =(a + ord(lowerCamelCase__ )) % MOD_ADLER
lowerCamelCase_ : List[str] =(b + a) % MOD_ADLER
return (b << 16) | a
| 209 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : int ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("only integers accepted as input" )
else:
lowerCamelCase_ : str =str(abs(lowerCamelCase__ ) )
lowerCamelCase_ : Tuple =[list(lowerCamelCase__ ) for char in range(len(lowerCamelCase__ ) )]
for index in range(len(lowerCamelCase__ ) ):
num_transpositions[index].pop(lowerCamelCase__ )
return max(
int("".join(list(lowerCamelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 209 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 150 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : List[Any] =StableDiffusionInstructPixaPixPipeline
lowercase : List[Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowercase : Optional[Any] =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Union[str, Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase : List[Any] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
lowerCamelCase_ =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
lowerCamelCase_ =AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
lowerCamelCase_ =CLIPTextModel(lowerCAmelCase )
lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase_ =Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('''RGB''' )
if str(lowerCAmelCase ).startswith('''mps''' ):
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
else:
lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ ='''french fries'''
lowerCamelCase_ =sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase )
lowerCamelCase_ =output.images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =[inputs['''prompt''']] * 2
lowerCamelCase_ =np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase_ =torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase )
lowerCamelCase_ =image / 2 + 0.5
lowerCamelCase_ =image.permute(0, 3, 1, 2 )
lowerCamelCase_ =image.repeat(2, 1, 1, 1 )
lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowerCamelCase_ =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''' )
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase )
lowerCamelCase_ =sd_pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
lowerCamelCase_ =[round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowerCamelCase_ =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowerCamelCase_ =VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' ) )[0]
lowerCamelCase_ =components['''vae''']
lowerCamelCase_ =self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCamelCase_ =vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCamelCase_ =pipe(**lowerCAmelCase )[0]
lowerCamelCase_ =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase, 1e-4, '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
lowerCamelCase_ ={
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase )
lowerCamelCase_ =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase )
lowerCamelCase_ =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =0
def callback_fn(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ) -> None:
lowerCamelCase_ =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ =latents[0, -3:, -3:, -1]
lowerCamelCase_ =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase_ =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowerCamelCase_ =latents[0, -3:, -3:, -1]
lowerCamelCase_ =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase_ =False
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =self.get_inputs()
pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowerCamelCase_ =pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ =self.get_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ =inputs['''image'''].resize((504, 504) )
lowerCamelCase_ ='''timbrooks/instruct-pix2pix'''
lowerCamelCase_ =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase, safety_checker=lowerCAmelCase, )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase_ =pipe(**lowerCAmelCase )
lowerCamelCase_ =output.images[0]
lowerCamelCase_ =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowerCamelCase_ =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 75 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class snake_case_ ( __lowercase ):
A_ = 'lxmert'
A_ = {}
def __init__( self : Any , _snake_case : List[str]=30522 , _snake_case : Dict=768 , _snake_case : Tuple=12 , _snake_case : List[str]=9500 , _snake_case : Any=1600 , _snake_case : Union[str, Any]=400 , _snake_case : Optional[int]=3072 , _snake_case : Tuple="gelu" , _snake_case : List[str]=0.1 , _snake_case : List[Any]=0.1 , _snake_case : int=512 , _snake_case : Dict=2 , _snake_case : List[Any]=0.02 , _snake_case : List[Any]=1E-12 , _snake_case : str=9 , _snake_case : List[str]=5 , _snake_case : List[Any]=5 , _snake_case : Any=2048 , _snake_case : Tuple=4 , _snake_case : int=6.67 , _snake_case : Optional[Any]=True , _snake_case : Optional[int]=True , _snake_case : Optional[Any]=True , _snake_case : Optional[Any]=True , _snake_case : List[Any]=True , _snake_case : Optional[Any]=True , _snake_case : Optional[Any]=True , **_snake_case : Optional[int] , )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Tuple = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Dict = type_vocab_size
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : Dict = num_qa_labels
__lowerCAmelCase : List[str] = num_object_labels
__lowerCAmelCase : Optional[int] = num_attr_labels
__lowerCAmelCase : Dict = l_layers
__lowerCAmelCase : Union[str, Any] = x_layers
__lowerCAmelCase : Dict = r_layers
__lowerCAmelCase : Tuple = visual_feat_dim
__lowerCAmelCase : str = visual_pos_dim
__lowerCAmelCase : str = visual_loss_normalizer
__lowerCAmelCase : Optional[int] = task_matched
__lowerCAmelCase : str = task_mask_lm
__lowerCAmelCase : Union[str, Any] = task_obj_predict
__lowerCAmelCase : Any = task_qa
__lowerCAmelCase : Dict = visual_obj_loss
__lowerCAmelCase : Optional[int] = visual_attr_loss
__lowerCAmelCase : List[str] = visual_feat_loss
__lowerCAmelCase : Any = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**_snake_case ) | 232 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_UpperCAmelCase = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
_UpperCAmelCase = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
_UpperCAmelCase = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Tuple )->str:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def UpperCAmelCase__ ( self : Dict , _snake_case : List[Any] , _snake_case : int , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , _snake_case : bool = False , )->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = len(references[0] )
if any(len(_snake_case ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__lowerCAmelCase : str = [[refs[i] for refs in references] for i in range(_snake_case )]
__lowerCAmelCase : Tuple = TER(
normalized=_snake_case , no_punct=_snake_case , asian_support=_snake_case , case_sensitive=_snake_case , )
__lowerCAmelCase : List[Any] = sb_ter.corpus_score(_snake_case , _snake_case )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length} | 232 | 1 |
"""simple docstring"""
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : int = 0
for i in range(1 , 1_001 ):
total += i**i
return str(_snake_case )[-10:]
if __name__ == "__main__":
print(solution())
| 102 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=32 , a_=3 , a_=4 , a_=[10, 20, 30, 40] , a_=[2, 2, 3, 2] , a_=True , a_=True , a_=37 , a_="gelu" , a_=10 , a_=0.02 , a_=["stage2", "stage3", "stage4"] , a_=[2, 3, 4] , a_=None , ):
'''simple docstring'''
__snake_case : List[str] = parent
__snake_case : str = batch_size
__snake_case : List[Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : str = num_stages
__snake_case : Any = hidden_sizes
__snake_case : Optional[int] = depths
__snake_case : Dict = is_training
__snake_case : Tuple = use_labels
__snake_case : str = intermediate_size
__snake_case : Optional[int] = hidden_act
__snake_case : Dict = num_labels
__snake_case : Tuple = initializer_range
__snake_case : Dict = out_features
__snake_case : Optional[int] = out_indices
__snake_case : str = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
if self.use_labels:
__snake_case : Any = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=a_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Optional[int] = ConvNextVaModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Any = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Any = ConvNextVaForImageClassification(a_ )
model.to(a_ )
model.eval()
__snake_case : Union[str, Any] = model(a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Dict = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__snake_case : str = None
__snake_case : Optional[Any] = ConvNextVaBackbone(config=a_ )
model.to(a_ )
model.eval()
__snake_case : Tuple = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Union[str, Any] = config_and_inputs
__snake_case : Optional[int] = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = ConvNextVaModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_with_labels()
__snake_case : int = True
if model_class.__name__ in [
*get_values(a_ ),
*get_values(a_ ),
]:
continue
__snake_case : Dict = model_class(a_ )
model.to(a_ )
model.train()
__snake_case : Tuple = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case : List[str] = model(**a_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__snake_case : Optional[Any] = False
__snake_case : Tuple = True
if (
model_class.__name__
in [*get_values(a_ ), *get_values(a_ )]
or not model_class.supports_gradient_checkpointing
):
continue
__snake_case : Union[str, Any] = model_class(a_ )
model.to(a_ )
model.gradient_checkpointing_enable()
model.train()
__snake_case : str = self._prepare_for_class(a_ , a_ , return_labels=a_ )
__snake_case : Union[str, Any] = model(**a_ ).loss
loss.backward()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = model_class(a_ )
__snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : int = [*signature.parameters.keys()]
__snake_case : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
def check_hidden_states_output(a_ , a_ , a_ ):
__snake_case : Tuple = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(a_ , a_ ) )
__snake_case : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case : List[Any] = self.model_tester.num_stages
self.assertEqual(len(a_ ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = True
check_hidden_states_output(a_ , a_ , a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(a_ , a_ , a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = ConvNextVaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def lowercase ( ) ->Any:
"""simple docstring"""
__snake_case : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(a_ )
__snake_case : Dict = self.default_image_processor
__snake_case : Tuple = prepare_img()
__snake_case : Tuple = preprocessor(images=a_ , return_tensors='''pt''' ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(**a_ )
# verify the logits
__snake_case : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case : Any = torch.tensor([0.9996, 0.1966, -0.4386] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1E-4 ) )
| 102 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=1_6 , __lowercase=3_6 , __lowercase=6 , __lowercase=6 , __lowercase=6 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> List[Any]:
"""simple docstring"""
a__ : Any = parent
a__ : str = batch_size
a__ : List[str] = seq_length
a__ : str = is_training
a__ : Optional[int] = use_input_mask
a__ : Optional[Any] = use_token_type_ids
a__ : Optional[Any] = use_labels
a__ : str = vocab_size
a__ : int = embedding_size
a__ : Any = hidden_size
a__ : List[Any] = num_hidden_layers
a__ : Tuple = num_hidden_groups
a__ : List[str] = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Dict = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : Any = max_position_embeddings
a__ : List[str] = type_vocab_size
a__ : List[Any] = type_sequence_label_size
a__ : List[Any] = initializer_range
a__ : Tuple = num_labels
a__ : Tuple = num_choices
a__ : Dict = scope
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : List[str] = None
if self.use_input_mask:
a__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
a__ : List[Any] = None
if self.use_token_type_ids:
a__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a__ : List[Any] = None
a__ : Optional[Any] = None
a__ : Optional[Any] = None
if self.use_labels:
a__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : str = ids_tensor([self.batch_size] , self.num_choices )
a__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = AlbertModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a__ : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
a__ : List[Any] = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
a__ : Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = AlbertForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a__ : Optional[int] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , sentence_order_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : Union[str, Any] = AlbertForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a__ : Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : Tuple = AlbertForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a__ : Dict = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : Dict = self.num_labels
a__ : Optional[Any] = AlbertForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a__ : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : List[str] = self.num_labels
a__ : Dict = AlbertForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a__ : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = self.num_choices
a__ : Union[str, Any] = AlbertForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a__ : List[Any] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : int = self.prepare_config_and_inputs()
(
a__
) : Any = config_and_inputs
a__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ (__UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :List[str] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase :Any = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase :Tuple = True
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase=False ) -> int:
"""simple docstring"""
a__ : Union[str, Any] = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
a__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
a__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Dict = AlbertModelTester(self )
a__ : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : List[str] = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = AlbertModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : str = AlbertModel.from_pretrained("""albert-base-v2""" )
a__ : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
a__ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
a__ : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
a__ : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , _lowerCAmelCase )
a__ : List[str] = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 370 |
from __future__ import annotations
import math
def lowerCAmelCase_ ( _lowercase : int) -> list[int]:
"""simple docstring"""
if num <= 0:
a__ : Tuple = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(_lowercase)
a__ : List[Any] = [True] * (num + 1)
a__ : List[str] = []
a__ : List[Any] = 2
a__ : Optional[int] = int(math.sqrt(_lowercase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowercase)
# Set multiples of start be False
for i in range(start * start , num + 1 , _lowercase):
if sieve[i] is True:
a__ : Optional[int] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(_lowercase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 266 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> list:
'''simple docstring'''
if len(__lowercase ) <= 1:
return [tuple(__lowercase )]
_A = []
def generate(__lowercase , __lowercase ):
_A = [0] * n
res.append(tuple(__lowercase ) )
_A = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_A , _A = arr[i], arr[0]
else:
_A , _A = arr[i], arr[c[i]]
res.append(tuple(__lowercase ) )
c[i] += 1
_A = 0
else:
_A = 0
i += 1
generate(len(__lowercase ) , __lowercase )
return res
if __name__ == "__main__":
lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 79 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
_A = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
_A = sylvester(number - 1 )
_A = num - 1
_A = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 79 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = generate_pascal_triangle(_lowerCamelCase )
for row_idx in range(_lowerCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_lowerCAmelCase : list[list[int]] = []
for current_row_idx in range(_lowerCamelCase ):
_lowerCAmelCase : Dict = populate_current_row(_lowerCamelCase , _lowerCamelCase )
triangle.append(_lowerCamelCase )
return triangle
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
_lowerCAmelCase , _lowerCAmelCase : Tuple = 1, 1
for current_col_idx in range(1 , _lowerCamelCase ):
calculate_current_element(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return current_row
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = triangle[current_row_idx - 1][current_col_idx - 1]
_lowerCAmelCase : str = triangle[current_row_idx - 1][current_col_idx]
_lowerCAmelCase : List[Any] = above_to_left_elt + above_to_right_elt
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
_lowerCAmelCase : list[list[int]] = [[1]]
for row_index in range(1 , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = [0] + result[-1] + [0]
_lowerCAmelCase : Optional[Any] = row_index + 1
# Calculate the number of distinct elements in a row
_lowerCAmelCase : int = sum(divmod(_lowerCamelCase , 2 ) )
_lowerCAmelCase : List[Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
_lowerCAmelCase : List[str] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
_lowerCAmelCase : Optional[int] = row_first_half + row_second_half
result.append(_lowerCamelCase )
return result
def A ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase , _lowerCamelCase ) -> None:
_lowerCAmelCase : List[Any] = F"{func.__name__}({value})"
_lowerCAmelCase : Tuple = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 300 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase_ ( a):
@staticmethod
@abstractmethod
def snake_case__ ( __a):
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def snake_case__ ( self):
'''simple docstring'''
raise NotImplementedError()
| 300 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : int ={
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] =[
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str =[
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ : str = -1
lowerCamelCase__ : Dict = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase__ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase__ : Any = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase__ : Dict = a * b * c
if candidate >= product:
lowerCamelCase__ : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }')
| 41 | 1 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
UpperCamelCase_ = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
UpperCamelCase_ = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
UpperCamelCase_ = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
def UpperCAmelCase__ ( self) ->Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float"),
"references": datasets.Value("float"),
}) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False) ->Optional[int]:
a_ = spearmanr(__UpperCAmelCase , __UpperCAmelCase)
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]} | 303 |
"""simple docstring"""
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = hf_hub_url(repo_id=UpperCAmelCase , path=UpperCAmelCase , revision=UpperCAmelCase )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase )}''' | 303 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Any = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__UpperCAmelCase ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__UpperCAmelCase ) )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCAmelCase ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__UpperCAmelCase ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__UpperCAmelCase ) )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__UpperCAmelCase : Optional[int] = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__UpperCAmelCase : Dict = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
# pass variant but use the non-variant filenames
__UpperCAmelCase : str = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
__UpperCAmelCase : str = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__UpperCAmelCase : int = """fp16"""
self.assertFalse(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
__UpperCAmelCase : List[str] = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
# pass variant but use the non-variant filenames
__UpperCAmelCase : int = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
__UpperCAmelCase : List[str] = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__UpperCAmelCase : Dict = """fp16"""
self.assertFalse(is_safetensors_compatible(__UpperCAmelCase , variant=__UpperCAmelCase ) )
| 254 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any="attention" ):
"""simple docstring"""
__UpperCAmelCase : int = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
__UpperCAmelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCAmelCase : Tuple = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCAmelCase : List[str] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
__UpperCAmelCase : List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCAmelCase : Optional[Any] = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
__UpperCAmelCase : Dict = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any]=False ):
"""simple docstring"""
if split_mlp_wi:
__UpperCAmelCase : List[str] = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
__UpperCAmelCase : Dict = (wi_a, wi_a)
else:
__UpperCAmelCase : Union[str, Any] = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
__UpperCAmelCase : Tuple = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowercase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowercase_ ( lowerCAmelCase__ : dict , *, lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
__UpperCAmelCase : Tuple = traverse_util.flatten_dict(variables["""target"""] )
__UpperCAmelCase : Union[str, Any] = {"""/""".join(lowerCAmelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCAmelCase : Any = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase__ )
__UpperCAmelCase : Any = collections.OrderedDict()
# Shared embeddings.
__UpperCAmelCase : int = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """attention""" )
__UpperCAmelCase : Any = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : Optional[int] = o.T
__UpperCAmelCase : str = q.T
__UpperCAmelCase : Any = v.T
# Block i, layer 1 (MLP).
__UpperCAmelCase : List[str] = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : int = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : List[Any] = wi[0].T
__UpperCAmelCase : Any = wi[1].T
else:
__UpperCAmelCase : Tuple = wi.T
__UpperCAmelCase : Tuple = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , lowerCAmelCase__ , """encoder""" ).T
__UpperCAmelCase : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__UpperCAmelCase : Any = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """encoder""" ).T
__UpperCAmelCase : Dict = tax_relpos_bias_lookup(
lowerCAmelCase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase__ ):
# Block i, layer 0 (Self Attention).
__UpperCAmelCase : str = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """self_attention""" )
__UpperCAmelCase : int = layer_norm
__UpperCAmelCase : Optional[Any] = k.T
__UpperCAmelCase : Dict = o.T
__UpperCAmelCase : int = q.T
__UpperCAmelCase : List[str] = v.T
# Block i, layer 1 (Cross Attention).
__UpperCAmelCase : Any = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = tax_attention_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """encoder_decoder_attention""" )
__UpperCAmelCase : Union[str, Any] = layer_norm
__UpperCAmelCase : List[Any] = k.T
__UpperCAmelCase : int = o.T
__UpperCAmelCase : Optional[int] = q.T
__UpperCAmelCase : Optional[int] = v.T
# Block i, layer 2 (MLP).
__UpperCAmelCase : Tuple = tax_layer_norm_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
__UpperCAmelCase , __UpperCAmelCase : Any = tax_mlp_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" , lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
__UpperCAmelCase : Optional[Any] = wi[0].T
__UpperCAmelCase : Optional[int] = wi[1].T
else:
__UpperCAmelCase : str = wi.T
__UpperCAmelCase : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCAmelCase : Union[str, Any] = tax_relpos_bias_lookup(lowerCAmelCase__ , lowerCAmelCase__ , """decoder""" ).T
__UpperCAmelCase : Dict = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCAmelCase : List[str] = old["""decoder/logits_dense/kernel"""].T
return new
def lowercase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : bool ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : str = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCAmelCase : List[str] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__UpperCAmelCase : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def lowercase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ):
"""simple docstring"""
__UpperCAmelCase : Tuple = checkpoints.load_tax_checkpoint(lowerCAmelCase__ )
__UpperCAmelCase : Any = convert_tax_to_pytorch(
lowerCAmelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase__ , scalable_attention=lowerCAmelCase__ )
__UpperCAmelCase : str = make_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = MTaConfig.from_json_file(lowerCAmelCase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCAmelCase : List[Any] = UMTaEncoderModel(lowerCAmelCase__ )
else:
__UpperCAmelCase : Dict = UMTaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase__ )
print("""Done""" )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
_UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 254 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = None
__lowerCAmelCase = None
def __a ( ) ->Node | None:
"""simple docstring"""
A = Node(1 )
A = Node(2 )
A = Node(3 )
A = Node(4 )
A = Node(5 )
return tree
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __a ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __a ( UpperCAmelCase ) ->Sequence[Node | None]:
"""simple docstring"""
A = []
if root is None:
return output
A = deque([root] )
while process_queue:
A = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Sequence[Node | None]:
"""simple docstring"""
A = []
def populate_output(UpperCAmelCase , UpperCAmelCase ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCAmelCase , UpperCAmelCase )
return output
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Sequence[Node | None]:
"""simple docstring"""
A = []
def populate_output(UpperCAmelCase , UpperCAmelCase ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCAmelCase , UpperCAmelCase )
return output
def __a ( UpperCAmelCase ) ->Sequence[Node | None] | list[Any]:
"""simple docstring"""
if root is None:
return []
A = []
A = 0
A = height(UpperCAmelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCAmelCase , UpperCAmelCase ) )
A = 1
else:
output.append(get_nodes_from_right_to_left(UpperCAmelCase , UpperCAmelCase ) )
A = 0
return output
def __a ( ) ->None: # Main function for testing.
"""simple docstring"""
A = make_tree()
print(f"""In-order Traversal: {inorder(UpperCAmelCase )}""" )
print(f"""Pre-order Traversal: {preorder(UpperCAmelCase )}""" )
print(f"""Post-order Traversal: {postorder(UpperCAmelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(UpperCAmelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(UpperCAmelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(UpperCAmelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(UpperCAmelCase , level=UpperCAmelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 337 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def __a ( ) ->str:
"""simple docstring"""
A = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase , default=UpperCAmelCase )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase , default=5E-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase , default=10 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase , default="""./results""" )
return parser.parse_args()
_lowerCamelCase : Optional[Any] = load('accuracy')
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A , A = eval_pred
A = np.argmax(UpperCAmelCase , axis=1 )
return metric.compute(predictions=UpperCAmelCase , references=UpperCAmelCase )
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Any ):
super().__init__()
A = trainer
def A (self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , **_lowerCAmelCase : List[Any] ):
if control.should_evaluate:
A = deepcopy(_lowerCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def __a ( ) ->Optional[int]:
"""simple docstring"""
A = get_args()
set_seed(args.seed )
A = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
A = dataset.train_test_split(test_size=0.2 )
A = train_test["""test"""].train_test_split(test_size=0.5 )
A = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase ):
A = tokenizer(example["""src"""] , truncation=UpperCAmelCase , max_length=1024 )
A = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=train_test_validation["""train"""].column_names , )
A = DataCollatorWithPadding(tokenizer=UpperCAmelCase )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
A = Trainer(
model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , compute_metrics=UpperCAmelCase , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 337 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a : Dict = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a : Optional[int] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
__UpperCAmelCase : Dict = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_a )[0]
@deprecated(_a , """Please use tf.data to implement this functionality.""" )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
__UpperCAmelCase : Any = _readaa(_a )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
__UpperCAmelCase : Any = _readaa(_a )
__UpperCAmelCase : Tuple = _readaa(_a )
__UpperCAmelCase : List[Any] = _readaa(_a )
__UpperCAmelCase : Union[str, Any] = bytestream.read(rows * cols * num_images )
__UpperCAmelCase : List[Any] = numpy.frombuffer(_a , dtype=numpy.uinta )
__UpperCAmelCase : int = data.reshape(_a , _a , _a , 1 )
return data
@deprecated(_a , """Please use tf.one_hot on tensors.""" )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Tuple ):
__UpperCAmelCase : List[Any] = labels_dense.shape[0]
__UpperCAmelCase : Optional[Any] = numpy.arange(_a ) * num_classes
__UpperCAmelCase : str = numpy.zeros((num_labels, num_classes) )
__UpperCAmelCase : Optional[Any] = 1
return labels_one_hot
@deprecated(_a , """Please use tf.data to implement this functionality.""" )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=False , __lowerCamelCase : Tuple=10 ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_a ) as bytestream:
__UpperCAmelCase : Optional[int] = _readaa(_a )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
__UpperCAmelCase : Union[str, Any] = _readaa(_a )
__UpperCAmelCase : Tuple = bytestream.read(_a )
__UpperCAmelCase : Dict = numpy.frombuffer(_a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_a , _a )
return labels
class a :
"""simple docstring"""
@deprecated(
_SCREAMING_SNAKE_CASE , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : Dict , __lowercase : Dict , __lowercase : Optional[int] , __lowercase : Tuple=False , __lowercase : Any=False , __lowercase : Optional[Any]=dtypes.floataa , __lowercase : List[str]=True , __lowercase : List[str]=None , ) -> List[Any]:
__UpperCAmelCase : List[Any] = random_seed.get_seed(_SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__UpperCAmelCase : Optional[int] = dtypes.as_dtype(_SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
__UpperCAmelCase : int = 10000
__UpperCAmelCase : List[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
__UpperCAmelCase : List[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__UpperCAmelCase : Tuple = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__UpperCAmelCase : Any = images.astype(numpy.floataa )
__UpperCAmelCase : Any = numpy.multiply(_SCREAMING_SNAKE_CASE , 1.0 / 255.0 )
__UpperCAmelCase : Tuple = images
__UpperCAmelCase : Tuple = labels
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Tuple = 0
@property
def UpperCAmelCase ( self : Tuple ) -> Dict:
return self._images
@property
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
return self._labels
@property
def UpperCAmelCase ( self : Tuple ) -> Dict:
return self._num_examples
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
return self._epochs_completed
def UpperCAmelCase ( self : List[Any] , __lowercase : List[Any] , __lowercase : Dict=False , __lowercase : Optional[int]=True ) -> List[str]:
if fake_data:
__UpperCAmelCase : Dict = [1] * 784
__UpperCAmelCase : Union[str, Any] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(_SCREAMING_SNAKE_CASE )],
)
__UpperCAmelCase : str = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__UpperCAmelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : List[Any] = self.images[perma]
__UpperCAmelCase : Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__UpperCAmelCase : Any = self._num_examples - start
__UpperCAmelCase : List[str] = self._images[start : self._num_examples]
__UpperCAmelCase : Tuple = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__UpperCAmelCase : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_SCREAMING_SNAKE_CASE )
__UpperCAmelCase : str = self.images[perm]
__UpperCAmelCase : List[Any] = self.labels[perm]
# Start next epoch
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Union[str, Any] = batch_size - rest_num_examples
__UpperCAmelCase : Any = self._index_in_epoch
__UpperCAmelCase : Optional[Any] = self._images[start:end]
__UpperCAmelCase : Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__UpperCAmelCase : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_a , """Please write your own downloading logic.""" )
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
if not gfile.Exists(_a ):
gfile.MakeDirs(_a )
__UpperCAmelCase : str = os.path.join(_a , _a )
if not gfile.Exists(_a ):
urllib.request.urlretrieve(_a , _a ) # noqa: S310
with gfile.GFile(_a ) as f:
__UpperCAmelCase : Optional[Any] = f.size()
print("""Successfully downloaded""" , _a , _a , """bytes.""" )
return filepath
@deprecated(
_a , """Please use alternatives such as:""" """ tensorflow_datasets.load(\'mnist\')""" )
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : List[Any]=dtypes.floataa , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=5000 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_a , one_hot=_a , dtype=_a , seed=_a )
__UpperCAmelCase : Tuple = fake()
__UpperCAmelCase : Union[str, Any] = fake()
__UpperCAmelCase : Tuple = fake()
return _Datasets(train=_a , validation=_a , test=_a )
if not source_url: # empty string check
__UpperCAmelCase : Optional[Any] = DEFAULT_SOURCE_URL
__UpperCAmelCase : Tuple = '''train-images-idx3-ubyte.gz'''
__UpperCAmelCase : Dict = '''train-labels-idx1-ubyte.gz'''
__UpperCAmelCase : List[str] = '''t10k-images-idx3-ubyte.gz'''
__UpperCAmelCase : Optional[int] = '''t10k-labels-idx1-ubyte.gz'''
__UpperCAmelCase : Optional[Any] = _maybe_download(
_a , _a , source_url + train_images_file )
with gfile.Open(_a , """rb""" ) as f:
__UpperCAmelCase : Optional[Any] = _extract_images(_a )
__UpperCAmelCase : Any = _maybe_download(
_a , _a , source_url + train_labels_file )
with gfile.Open(_a , """rb""" ) as f:
__UpperCAmelCase : Any = _extract_labels(_a , one_hot=_a )
__UpperCAmelCase : Any = _maybe_download(
_a , _a , source_url + test_images_file )
with gfile.Open(_a , """rb""" ) as f:
__UpperCAmelCase : str = _extract_images(_a )
__UpperCAmelCase : Dict = _maybe_download(
_a , _a , source_url + test_labels_file )
with gfile.Open(_a , """rb""" ) as f:
__UpperCAmelCase : int = _extract_labels(_a , one_hot=_a )
if not 0 <= validation_size <= len(_a ):
__UpperCAmelCase : Dict = (
'''Validation size should be between 0 and '''
f"""{len(_a )}. Received: {validation_size}."""
)
raise ValueError(_a )
__UpperCAmelCase : List[str] = train_images[:validation_size]
__UpperCAmelCase : Any = train_labels[:validation_size]
__UpperCAmelCase : Optional[Any] = train_images[validation_size:]
__UpperCAmelCase : Optional[int] = train_labels[validation_size:]
__UpperCAmelCase : Optional[Any] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__UpperCAmelCase : List[str] = _DataSet(_a , _a , **_a )
__UpperCAmelCase : Dict = _DataSet(_a , _a , **_a )
__UpperCAmelCase : Dict = _DataSet(_a , _a , **_a )
return _Datasets(train=_a , validation=_a , test=_a )
| 114 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _a ( _lowercase):
def UpperCAmelCase__( self : int )-> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , '''num_encoder_blocks''' ) )
class _a :
def __init__( self : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any=13 , _SCREAMING_SNAKE_CASE : List[Any]=64 , _SCREAMING_SNAKE_CASE : str=3 , _SCREAMING_SNAKE_CASE : Union[str, Any]=4 , _SCREAMING_SNAKE_CASE : Optional[int]=[2, 2, 2, 2] , _SCREAMING_SNAKE_CASE : Tuple=[8, 4, 2, 1] , _SCREAMING_SNAKE_CASE : Dict=[16, 32, 64, 128] , _SCREAMING_SNAKE_CASE : Dict=[1, 4, 8, 16] , _SCREAMING_SNAKE_CASE : str=[1, 2, 4, 8] , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Tuple="gelu" , _SCREAMING_SNAKE_CASE : str=0.1 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : List[Any]=0.02 , _SCREAMING_SNAKE_CASE : Any=3 , _SCREAMING_SNAKE_CASE : Optional[int]=None , )-> List[str]:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : Optional[Any] = num_encoder_blocks
lowerCAmelCase__ : Union[str, Any] = sr_ratios
lowerCAmelCase__ : int = depths
lowerCAmelCase__ : Optional[int] = hidden_sizes
lowerCAmelCase__ : Optional[Any] = downsampling_rates
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : Tuple = hidden_act
lowerCAmelCase__ : Tuple = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : Union[str, Any] = scope
def UpperCAmelCase__( self : Tuple )-> Optional[Any]:
lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__( self : List[str] )-> Optional[int]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Any:
lowerCAmelCase__ : Union[str, Any] = SegformerModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Any:
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Tuple = SegformerForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int )-> Tuple:
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : Tuple = SegformerForSemanticSegmentation(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase__( self : Union[str, Any] )-> List[str]:
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = config_and_inputs
lowerCAmelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowercase , _lowercase , unittest.TestCase):
_a : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_a : Any = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a : List[Any] = True
_a : int = False
_a : List[str] = False
_a : Union[str, Any] = False
def UpperCAmelCase__( self : Optional[int] )-> Dict:
lowerCAmelCase__ : List[Any] = SegformerModelTester(self )
lowerCAmelCase__ : Optional[Any] = SegformerConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
self.config_tester.run_common_tests()
def UpperCAmelCase__( self : Optional[int] )-> Any:
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any )-> Dict:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] )-> Tuple:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_SCREAMING_SNAKE_CASE )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def UpperCAmelCase__( self : int )-> Dict:
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def UpperCAmelCase__( self : str )-> str:
pass
def UpperCAmelCase__( self : str )-> Any:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : int = [*signature.parameters.keys()]
lowerCAmelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : List[str] )-> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Union[str, Any] = outputs.attentions
lowerCAmelCase__ : List[str] = sum(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : int = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ : str = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ : Optional[int] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase__ : str = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase__ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[int] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 1 , len(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ : List[Any] = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCAmelCase__( self : List[str] )-> List[Any]:
def check_hidden_states_output(_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowerCAmelCase__ : str = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Union[str, Any] = outputs.hidden_states
lowerCAmelCase__ : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Optional[int] = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Tuple )-> Dict:
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
lowerCAmelCase__ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
lowerCAmelCase__ : Any = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__( self : Union[str, Any] )-> Dict:
pass
@slow
def UpperCAmelCase__( self : Union[str, Any] )-> List[Any]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Union[str, Any] = SegformerModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class _a ( unittest.TestCase):
@slow
def UpperCAmelCase__( self : str )-> Any:
# only resize + normalize
lowerCAmelCase__ : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCAmelCase__( self : Optional[Any] )-> Any:
# only resize + normalize
lowerCAmelCase__ : Union[str, Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase__ : Dict = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-1 ) )
@slow
def UpperCAmelCase__( self : Any )-> Optional[Any]:
# only resize + normalize
lowerCAmelCase__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_SCREAMING_SNAKE_CASE , align=_SCREAMING_SNAKE_CASE , do_random_crop=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
lowerCAmelCase__ : Any = encoded_inputs.pixel_values.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = outputs.logits.detach().cpu()
lowerCAmelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(500, 300)] )
lowerCAmelCase__ : Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
| 131 | 0 |
"""simple docstring"""
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case :
def __init__( self : int , A : Union[str, Any] , A : Optional[int]=sys.maxsize ):
'''simple docstring'''
a : str = 'bilinear'
a : List[str] = max_size
a : Tuple = short_edge_length
def __call__( self : Optional[int] , A : Tuple ):
'''simple docstring'''
a : Tuple = []
for img in imgs:
a, a : Optional[int] = img.shape[:2]
# later: provide list and randomly choose index for resize
a : Optional[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
a : int = size * 1.0 / min(A , A )
if h < w:
a, a : str = size, scale * w
else:
a, a : Optional[Any] = scale * h, size
if max(A , A ) > self.max_size:
a : Optional[int] = self.max_size * 1.0 / max(A , A )
a : Optional[int] = newh * scale
a : Optional[Any] = neww * scale
a : List[Any] = int(neww + 0.5 )
a : Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
a : Union[str, Any] = Image.fromarray(A )
a : str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
a : List[str] = np.asarray(A )
else:
a : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
a : str = nn.functional.interpolate(
A , (newh, neww) , mode=self.interp_method , align_corners=A ).squeeze(0 )
img_augs.append(A )
return img_augs
class snake_case :
def __init__( self : Union[str, Any] , A : List[str] ):
'''simple docstring'''
a : Optional[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
a : List[Any] = cfg.INPUT.FORMAT
a : List[str] = cfg.SIZE_DIVISIBILITY
a : Dict = cfg.PAD_VALUE
a : Any = cfg.INPUT.MAX_SIZE_TEST
a : List[Any] = cfg.MODEL.DEVICE
a : Optional[Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a : List[str] = lambda A : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase__ ( self : Optional[int] , A : Tuple ):
'''simple docstring'''
a : Tuple = tuple(max(A ) for s in zip(*[img.shape for img in images] ) )
a : Optional[Any] = [im.shape[-2:] for im in images]
a : Optional[Any] = [
nn.functional.pad(
A , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(A , A )
]
return torch.stack(A ), torch.tensor(A )
def __call__( self : Optional[int] , A : Dict , A : int=False ):
'''simple docstring'''
with torch.no_grad():
if not isinstance(A , A ):
a : Optional[int] = [images]
if single_image:
assert len(A ) == 1
for i in range(len(A ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(A , images.pop(A ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
A , torch.as_tensor(img_tensorize(images.pop(A ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
a : Any = torch.tensor([im.shape[:2] for im in images] )
a : Tuple = self.aug(A )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
a : str = [self.normalizer(A ) for x in images]
# now pad them to do the following operations
a, a : Union[str, Any] = self.pad(A )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
a : Dict = torch.true_divide(A , A )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def snake_case (A_ :Any , A_ :Tuple ):
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def snake_case (A_ :Dict , A_ :Tuple[int, int] ):
'''simple docstring'''
assert torch.isfinite(A_ ).all(), "Box tensor contains infinite or NaN!"
a, a : List[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=A_ )
tensor[:, 1].clamp_(min=0 , max=A_ )
tensor[:, 2].clamp_(min=0 , max=A_ )
tensor[:, 3].clamp_(min=0 , max=A_ )
| 186 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_UpperCamelCase : int = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class snake_case :
__magic_name__ = PegasusConfig
__magic_name__ = {}
__magic_name__ = '''gelu'''
def __init__( self : int , A : Optional[int] , A : Dict=1_3 , A : Tuple=7 , A : Union[str, Any]=True , A : Union[str, Any]=False , A : int=9_9 , A : Any=3_2 , A : str=5 , A : Optional[int]=4 , A : List[Any]=3_7 , A : Optional[Any]=0.1 , A : Tuple=0.1 , A : List[Any]=2_0 , A : Optional[int]=2 , A : Dict=1 , A : List[Any]=0 , ):
'''simple docstring'''
a : Dict = parent
a : Optional[Any] = batch_size
a : Any = seq_length
a : Dict = is_training
a : Optional[Any] = use_labels
a : List[str] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Any = num_attention_heads
a : Any = intermediate_size
a : Optional[Any] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Dict = eos_token_id
a : Tuple = pad_token_id
a : str = bos_token_id
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
a : Dict = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
a : List[str] = np.concatenate([input_ids, eos_tensor] , axis=1 )
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
a : Union[str, Any] = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def lowerCamelCase__ ( self : Optional[Any] , A : List[Any] , A : Optional[Any] , A : Dict ):
'''simple docstring'''
a : List[Any] = 2_0
a : int = model_class_name(A )
a : Union[str, Any] = model.encode(inputs_dict['input_ids'] )
a, a : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a : Any = model.init_cache(decoder_input_ids.shape[0] , A , A )
a : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
a : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
a : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
a : Union[str, Any] = model.decode(A , A )
a : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Optional[int] , A : Tuple , A : str , A : str ):
'''simple docstring'''
a : Optional[Any] = 2_0
a : int = model_class_name(A )
a : Any = model.encode(inputs_dict['input_ids'] )
a, a : Dict = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a : Tuple = model.init_cache(decoder_input_ids.shape[0] , A , A )
a : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : str = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
a : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
a : str = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
a : List[Any] = model.decode(A , A , decoder_attention_mask=A )
a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def snake_case (A_ :List[Any] , A_ :Tuple , A_ :List[str] , A_ :List[Any]=None , A_ :Any=None , ):
'''simple docstring'''
if attention_mask is None:
a : Optional[Any] = np.not_equal(A_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
a : List[str] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__magic_name__ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : str = FlaxPegasusModelTester(self )
a : str = ConfigTester(self , config_class=A )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a, a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a, a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : List[Any] = self._prepare_for_class(A , A )
a : str = model_class(A )
@jax.jit
def encode_jitted(A : str , A : List[Any]=None , **A : str ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest('JIT Enabled' ):
a : Optional[int] = encode_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a : Optional[int] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : str = model_class(A )
a : Union[str, Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
a : int = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(A : Optional[int] , A : Tuple , A : Dict ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest('JIT Enabled' ):
a : Any = decode_jitted(**A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
a : Optional[Any] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a : Dict = model_class_name.from_pretrained('google/pegasus-large' , from_pt=A )
a : Dict = np.ones((1, 1) )
a : List[Any] = model(A )
self.assertIsNotNone(A )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : List[Any] = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
a : Tuple = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
a : Any = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
a : Tuple = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
a : List[Any] = tokenizer(A , return_tensors='np' , truncation=A , max_length=5_1_2 , padding=A )
a : Any = model.generate(**A , num_beams=2 ).sequences
a : Optional[Any] = tokenizer.batch_decode(A , skip_special_tokens=A )
assert tgt_text == decoded
| 186 | 1 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = StableUnCLIPPipeline
__UpperCamelCase = TEXT_TO_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__UpperCamelCase = False
def UpperCAmelCase__ ( self :List[Any] ) -> Any:
UpperCAmelCase = 32
UpperCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
UpperCAmelCase = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
UpperCAmelCase = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL()
UpperCAmelCase = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str , lowercase_ :Dict=0 ) -> List[str]:
if str(lowercase_ ).startswith('mps' ):
UpperCAmelCase = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Any ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]:
UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase = pipe('anime turle' , generator=lowercase_ , output_type='np' )
UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Tuple ) -> Tuple:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
UpperCAmelCase = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 78 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowerCAmelCase__ = 100
lowerCAmelCase__ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowerCAmelCase__ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase : set[int] = set()
lowerCAmelCase : int
lowerCAmelCase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def a__ ( SCREAMING_SNAKE_CASE : int = 5_0_0_0 ):
'''simple docstring'''
for number_to_partition in range(1 , SCREAMING_SNAKE_CASE ):
if len(partition(SCREAMING_SNAKE_CASE ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 108 | 0 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Tuple=99 , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : Any=5 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Optional[Any]=36 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : Optional[int]=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[str]=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Union[str, Any]=None , ):
a : Dict = parent
a : List[str] = batch_size
a : Any = seq_length
a : Dict = is_training
a : List[Any] = use_input_mask
a : List[str] = use_token_type_ids
a : Union[str, Any] = use_labels
a : Union[str, Any] = vocab_size
a : Optional[int] = hidden_size
a : Optional[Any] = num_hidden_layers
a : List[str] = num_attention_heads
a : Any = intermediate_size
a : Tuple = hidden_act
a : Dict = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Union[str, Any] = max_position_embeddings
a : Optional[Any] = type_vocab_size
a : Optional[Any] = type_sequence_label_size
a : Optional[Any] = initializer_range
a : int = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = scope
def __snake_case ( self : Union[str, Any]):
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : str = None
if self.use_input_mask:
a : Dict = random_attention_mask([self.batch_size, self.seq_length])
a : int = None
if self.use_token_type_ids:
a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : List[str] = None
a : Dict = None
a : Optional[Any] = None
if self.use_labels:
a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Dict):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __snake_case ( self : Dict):
a : Union[str, Any] = self.get_config()
a : str = 300
return config
def __snake_case ( self : List[Any]):
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Dict = self.prepare_config_and_inputs()
a : Tuple = True
a : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __snake_case ( self : int , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any]):
a : Tuple = MraModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a : List[str] = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , ):
a : Any = True
a : str = MraModel(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
a : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any]):
a : Optional[int] = MraForMaskedLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : str , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]):
a : str = MraForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any):
a : str = self.num_labels
a : Union[str, Any] = MraForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]):
a : Optional[int] = self.num_labels
a : int = MraForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : List[str]):
a : Any = self.num_choices
a : Optional[int] = MraForMultipleChoice(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : List[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : List[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __snake_case ( self : Optional[int]):
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : str = config_and_inputs
a : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : int = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : str = False
UpperCAmelCase : Optional[Any] = ()
def __snake_case ( self : Optional[Any]):
a : int = MraModelTester(self)
a : int = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : Tuple):
self.config_tester.run_common_tests()
def __snake_case ( self : List[str]):
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : Any):
a : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : int = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase)
def __snake_case ( self : int):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : List[Any]):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = MraModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@unittest.skip(reason="MRA does not output attentions")
def __snake_case ( self : str):
return
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Any):
a : Optional[int] = MraModel.from_pretrained("uw-madison/mra-base-512-4")
a : Dict = torch.arange(256).unsqueeze(0)
with torch.no_grad():
a : Optional[Any] = model(__UpperCAmelCase)[0]
a : Any = torch.Size((1, 256, 768))
self.assertEqual(output.shape , __UpperCAmelCase)
a : List[Any] = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4))
@slow
def __snake_case ( self : Union[str, Any]):
a : List[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4")
a : Optional[Any] = torch.arange(256).unsqueeze(0)
with torch.no_grad():
a : Tuple = model(__UpperCAmelCase)[0]
a : Tuple = 50265
a : List[Any] = torch.Size((1, 256, vocab_size))
self.assertEqual(output.shape , __UpperCAmelCase)
a : int = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4))
@slow
def __snake_case ( self : List[Any]):
a : Optional[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3")
a : str = torch.arange(4096).unsqueeze(0)
with torch.no_grad():
a : List[str] = model(__UpperCAmelCase)[0]
a : List[Any] = 50265
a : Tuple = torch.Size((1, 4096, vocab_size))
self.assertEqual(output.shape , __UpperCAmelCase)
a : Any = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4))
| 226 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__lowercase = True
except ImportError:
__lowercase = False
try:
from torch.hub import _get_torch_home
__lowercase = _get_torch_home()
except ImportError:
__lowercase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
__lowercase = os.path.join(torch_cache_home, """transformers""")
__lowercase = """https://cdn.huggingface.co"""
__lowercase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
__lowercase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
__lowercase = os.path.join(PATH, """config.yaml""")
__lowercase = os.path.join(PATH, """attributes.txt""")
__lowercase = os.path.join(PATH, """objects.txt""")
__lowercase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
__lowercase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
__lowercase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
__lowercase = """pytorch_model.bin"""
__lowercase = """config.yaml"""
def lowercase ( A_=OBJECTS , A_=ATTRIBUTES )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
a : Union[str, Any] = []
with open(A_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Dict = OrderedDict()
with open(A_ , "rb" ) as f:
a : Optional[Any] = pkl.load(A_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
a : Dict = ckp.pop(A_ )
if isinstance(A_ , np.ndarray ):
a : Optional[Any] = torch.tensor(A_ )
else:
assert isinstance(A_ , torch.tensor ), type(A_ )
a : int = v
return r
class _A :
"""simple docstring"""
UpperCAmelCase : int = {}
def __init__( self : Any , __UpperCAmelCase : dict , __UpperCAmelCase : str = "root" , __UpperCAmelCase : Optional[int]=0):
a : List[str] = name
a : Tuple = level
a : int = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
a : List[Any] = copy.deepcopy(__UpperCAmelCase)
a : int = copy.deepcopy(__UpperCAmelCase)
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Union[str, Any] = Config(__UpperCAmelCase , name=__UpperCAmelCase , level=level + 1)
a : Dict = v
setattr(self , __UpperCAmelCase , __UpperCAmelCase)
a : Tuple = d
def __repr__( self : List[str]):
return str(list((self._pointer.keys())))
def __setattr__( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Tuple):
a : Optional[Any] = val
a : Tuple = val
a : Dict = key.split(".")
a : Union[str, Any] = len(__UpperCAmelCase) - 1
a : Optional[int] = self._pointer
if len(__UpperCAmelCase) > 1:
for i, l in enumerate(__UpperCAmelCase):
if hasattr(self , __UpperCAmelCase) and isinstance(getattr(self , __UpperCAmelCase) , __UpperCAmelCase):
setattr(getattr(self , __UpperCAmelCase) , ".".join(levels[i:]) , __UpperCAmelCase)
if l == last_level:
a : int = val
else:
a : str = pointer[l]
def __snake_case ( self : str):
return self._pointer
def __snake_case ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any]):
with open(f'''{file_name}''' , "w") as stream:
dump(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int):
with open(f'''{file_name}''' , "w") as stream:
json.dump(__UpperCAmelCase , __UpperCAmelCase)
@staticmethod
def __snake_case ( __UpperCAmelCase : Dict):
with open(__UpperCAmelCase) as stream:
a : List[str] = load(__UpperCAmelCase , Loader=__UpperCAmelCase)
return data
def __str__( self : Tuple):
a : str = " "
if self._name != "root":
a : List[str] = f'''{t * (self._level-1)}{self._name}:\n'''
else:
a : Optional[Any] = ""
a : List[Any] = self._level
for i, (k, v) in enumerate(self._pointer.items()):
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(__UpperCAmelCase).__name__})\n'''
a : Tuple = level
return r[:-1]
@classmethod
def __snake_case ( cls : str , __UpperCAmelCase : str , **__UpperCAmelCase : List[Any]):
a , a : Tuple = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase)
return cls(__UpperCAmelCase)
@classmethod
def __snake_case ( cls : Union[str, Any] , __UpperCAmelCase : str , **__UpperCAmelCase : List[str]):
a : int = kwargs.pop("cache_dir" , __UpperCAmelCase)
a : List[Any] = kwargs.pop("force_download" , __UpperCAmelCase)
a : Optional[int] = kwargs.pop("resume_download" , __UpperCAmelCase)
a : Tuple = kwargs.pop("proxies" , __UpperCAmelCase)
a : int = kwargs.pop("local_files_only" , __UpperCAmelCase)
if os.path.isdir(__UpperCAmelCase):
a : Union[str, Any] = os.path.join(__UpperCAmelCase , __UpperCAmelCase)
elif os.path.isfile(__UpperCAmelCase) or is_remote_url(__UpperCAmelCase):
a : List[Any] = pretrained_model_name_or_path
else:
a : int = hf_bucket_url(__UpperCAmelCase , filename=__UpperCAmelCase , use_cdn=__UpperCAmelCase)
try:
# Load from URL or cache if already cached
a : Optional[Any] = cached_path(
__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , proxies=__UpperCAmelCase , resume_download=__UpperCAmelCase , local_files_only=__UpperCAmelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
a : Union[str, Any] = Config.load_yaml(__UpperCAmelCase)
except EnvironmentError:
a : str = "Can't load config for"
raise EnvironmentError(__UpperCAmelCase)
if resolved_config_file == config_file:
print("loading configuration file from path")
else:
print("loading configuration file cache")
return Config.load_yaml(__UpperCAmelCase), kwargs
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Tuple = torch.load("dump.pt" , map_location=in_tensor.device )
a : Any = in_tensor.numpy()
a : Optional[int] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(A_ , A_ , rtol=0.0_1 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[Any] = urlparse(A_ )
return parsed.scheme in ("http", "https")
def lowercase ( A_ , A_ , A_=True )-> str:
'''simple docstring'''
a : List[Any] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
a : str = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase ( A_ , A_ , A_=None , A_=0 , A_=None , )-> List[str]:
'''simple docstring'''
a : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(A_ , A_ ):
ua += "; " + "; ".join("{}/{}".format(A_ , A_ ) for k, v in user_agent.items() )
elif isinstance(A_ , A_ ):
ua += "; " + user_agent
a : str = {"user-agent": ua}
if resume_size > 0:
a : List[Any] = "bytes=%d-" % (resume_size,)
a : str = requests.get(A_ , stream=A_ , proxies=A_ , headers=A_ )
if response.status_code == 416: # Range not satisfiable
return
a : Optional[int] = response.headers.get("Content-Length" )
a : List[Any] = resume_size + int(A_ ) if content_length is not None else None
a : List[Any] = tqdm(
unit="B" , unit_scale=A_ , total=A_ , initial=A_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(A_ ) )
temp_file.write(A_ )
progress.close()
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=10 , A_=False , A_=None , A_=False , )-> str:
'''simple docstring'''
if cache_dir is None:
a : List[Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : Tuple = str(A_ )
os.makedirs(A_ , exist_ok=A_ )
a : Optional[Any] = None
if not local_files_only:
try:
a : Dict = requests.head(A_ , allow_redirects=A_ , proxies=A_ , timeout=A_ )
if response.status_code == 200:
a : int = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
a : List[str] = url_to_filename(A_ , A_ )
# get cache path to put the file
a : List[str] = os.path.join(A_ , A_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(A_ ):
return cache_path
else:
a : Any = [
file
for file in fnmatch.filter(os.listdir(A_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(A_ ) > 0:
return os.path.join(A_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(A_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
a : Dict = cache_path + ".lock"
with FileLock(A_ ):
# If the download just completed while the lock was activated.
if os.path.exists(A_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
a : Optional[Any] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(A_ , "a+b" ) as f:
yield f
a : Tuple = _resumable_file_manager
if os.path.exists(A_ ):
a : Optional[Any] = os.stat(A_ ).st_size
else:
a : Optional[int] = 0
else:
a : Union[str, Any] = partial(tempfile.NamedTemporaryFile , dir=A_ , delete=A_ )
a : Dict = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , A_ , temp_file.name , )
http_get(
A_ , A_ , proxies=A_ , resume_size=A_ , user_agent=A_ , )
os.replace(temp_file.name , A_ )
a : List[str] = {"url": url, "etag": etag}
a : Tuple = cache_path + ".json"
with open(A_ , "w" ) as meta_file:
json.dump(A_ , A_ )
return cache_path
def lowercase ( A_ , A_=None )-> Any:
'''simple docstring'''
a : Dict = url.encode("utf-8" )
a : Optional[Any] = shaaaa(A_ )
a : Any = url_hash.hexdigest()
if etag:
a : Union[str, Any] = etag.encode("utf-8" )
a : Tuple = shaaaa(A_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase ( A_ , A_=None , A_=False , A_=None , A_=False , A_=None , A_=False , A_=False , A_=False , )-> Tuple:
'''simple docstring'''
if cache_dir is None:
a : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(A_ , A_ ):
a : List[Any] = str(A_ )
if isinstance(A_ , A_ ):
a : int = str(A_ )
if is_remote_url(A_ ):
# URL, so get it from the cache (downloading if necessary)
a : Optional[Any] = get_from_cache(
A_ , cache_dir=A_ , force_download=A_ , proxies=A_ , resume_download=A_ , user_agent=A_ , local_files_only=A_ , )
elif os.path.exists(A_ ):
# File, and it exists.
a : Union[str, Any] = url_or_filename
elif urlparse(A_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(A_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(A_ ) )
if extract_compressed_file:
if not is_zipfile(A_ ) and not tarfile.is_tarfile(A_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
a , a : Dict = os.path.split(A_ )
a : List[str] = output_file.replace("." , "-" ) + "-extracted"
a : Optional[Any] = os.path.join(A_ , A_ )
if os.path.isdir(A_ ) and os.listdir(A_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
a : Tuple = output_path + ".lock"
with FileLock(A_ ):
shutil.rmtree(A_ , ignore_errors=A_ )
os.makedirs(A_ )
if is_zipfile(A_ ):
with ZipFile(A_ , "r" ) as zip_file:
zip_file.extractall(A_ )
zip_file.close()
elif tarfile.is_tarfile(A_ ):
a : List[str] = tarfile.open(A_ )
tar_file.extractall(A_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(A_ ) )
return output_path_extracted
return output_path
def lowercase ( A_ , A_="," )-> Union[str, Any]:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
with open(A_ ) as f:
a : str = eval(f.read() )
else:
a : List[Any] = requests.get(A_ )
try:
a : Any = requests.json()
except Exception:
a : Any = req.content.decode()
assert data is not None, "could not connect"
try:
a : Optional[Any] = eval(A_ )
except Exception:
a : Any = data.split("\n" )
req.close()
return data
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Optional[int] = requests.get(A_ )
a : List[str] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(A_ )
with open(A_ , "rb" ) as stream:
a : Any = pkl.load(A_ )
a : List[str] = weights.pop("model" )
a : Dict = {}
for k, v in model.items():
a : List[str] = torch.from_numpy(A_ )
if "running_var" in k:
a : Dict = torch.tensor([0] )
a : Any = k.replace("running_var" , "num_batches_tracked" )
a : List[Any] = zero
return new
def lowercase ( )-> Optional[int]:
'''simple docstring'''
print(F'''{os.path.abspath(os.path.join(A_ , os.pardir ) )}/demo.ipynb''' )
def lowercase ( A_ , A_="RGB" )-> Any:
'''simple docstring'''
assert isinstance(A_ , A_ )
if os.path.isfile(A_ ):
a : Dict = cva.imread(A_ )
else:
a : Union[str, Any] = get_image_from_url(A_ )
assert img is not None, F'''could not connect to: {im}'''
a : int = cva.cvtColor(A_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
a : List[str] = img[:, :, ::-1]
return img
def lowercase ( A_ , A_=1 )-> int:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(A_ ) , A_ ))
| 226 | 1 |
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Tuple = [1]
for i in range(2 , __lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : str = list(range(__lowerCamelCase ) )
# Find permutation
while factorials:
__UpperCAmelCase : Any = factorials.pop()
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = divmod(__lowerCamelCase , __lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : int = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def UpperCAmelCase ( self : List[str] , __lowercase : Optional[Any]=0 ) -> Any:
__UpperCAmelCase : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowercase ) )
__UpperCAmelCase : int = np.random.RandomState(__lowercase )
__UpperCAmelCase : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : int = self.get_dummy_inputs()
__UpperCAmelCase : Optional[Any] = pipe(**__lowercase ).images
__UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : List[str] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
__UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Any = self.get_dummy_inputs()
__UpperCAmelCase : Tuple = pipe(**__lowercase ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : str = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
# warmup pass to apply optimizations
__UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs() )
__UpperCAmelCase : Tuple = self.get_dummy_inputs()
__UpperCAmelCase : Any = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Optional[int] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Tuple = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : int ) -> Any:
__UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : List[str] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase ( self : Tuple ) -> str:
__UpperCAmelCase : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__UpperCAmelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
__UpperCAmelCase : int = pipe(**__lowercase ).images
__UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : Dict ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self : Tuple ) -> Tuple:
__UpperCAmelCase : Optional[int] = ort.SessionOptions()
__UpperCAmelCase : List[Any] = False
return options
def UpperCAmelCase ( self : List[str] ) -> Tuple:
__UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase : Dict = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__UpperCAmelCase : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = """A fantasy landscape, trending on artstation"""
__UpperCAmelCase : str = np.random.RandomState(0 )
__UpperCAmelCase : Optional[Any] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : str = output.images
__UpperCAmelCase : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase : Union[str, Any] = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self : Optional[Any] ) -> str:
__UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__UpperCAmelCase : int = init_image.resize((768, 512) )
__UpperCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__UpperCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Dict = """A fantasy landscape, trending on artstation"""
__UpperCAmelCase : int = np.random.RandomState(0 )
__UpperCAmelCase : Optional[int] = pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowercase , output_type="""np""" , )
__UpperCAmelCase : Union[str, Any] = output.images
__UpperCAmelCase : Union[str, Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__UpperCAmelCase : str = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 114 | 1 |
from ..utils import DummyObject, requires_backends
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *A_ : int , **A_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Optional[Any] , *A_ : Dict , **A_ : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Dict , *A_ : Tuple , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *A_ : Any , **A_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Union[str, Any] , *A_ : int , **A_ : str ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : str , *A_ : Any , **A_ : Tuple ) -> int:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[int] , *A_ : int , **A_ : List[str] ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : List[str] , *A_ : Dict , **A_ : Optional[int] ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Tuple , *A_ : List[Any] , **A_ : int ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *A_ : Tuple , **A_ : str ) -> Dict:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Dict , *A_ : str , **A_ : Any ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Union[str, Any] , *A_ : List[str] , **A_ : List[str] ) -> int:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str , *A_ : str , **A_ : Optional[Any] ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Union[str, Any] , *A_ : Optional[int] , **A_ : Union[str, Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Any , *A_ : Union[str, Any] , **A_ : List[str] ) -> Dict:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A( metaclass=UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Optional[Any] , *A_ : str , **A_ : str ) -> Any:
"""simple docstring"""
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Tuple , *A_ : Optional[int] , **A_ : str ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def a__ ( cls : Tuple , *A_ : List[str] , **A_ : Dict ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 208 |
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Dict , lowercase : List[str] , lowercase : Dict , lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
if index == r:
for j in range(lowercase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCamelCase_ = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Any , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 208 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''yjernite/retribert-base-uncased''': 5_1_2,
}
lowerCAmelCase_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Optional[Any] = RetriBertTokenizer
lowerCamelCase_ : Dict = ['''input_ids''', '''attention_mask''']
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
snake_case_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars
):
snake_case_ : int = getattr(__magic_name__ , normalizer_state.pop('''type''' ) )
snake_case_ : Union[str, Any] = do_lower_case
snake_case_ : Union[str, Any] = strip_accents
snake_case_ : int = tokenize_chinese_chars
snake_case_ : Tuple = normalizer_class(**__magic_name__ )
snake_case_ : Dict = do_lower_case
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Any = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 279 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Any = BioGptTokenizer
lowerCamelCase_ : Optional[Any] = False
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__magic_name__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : str = '''lower newer'''
snake_case_ : Dict = '''lower newer'''
return input_text, output_text
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
snake_case_ : Union[str, Any] = '''lower'''
snake_case_ : Optional[int] = ['''low''', '''er</w>''']
snake_case_ : Any = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = tokens + ['''<unk>''']
snake_case_ : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
@slow
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 279 | 1 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __snake_case :
'''simple docstring'''
@staticmethod
def UpperCAmelCase__ ( *A : str , **A : List[Any] ):
pass
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__UpperCAmelCase : Tuple = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self : Optional[int] , A : Union[str, Any] , A : Tuple , A : str ):
__snake_case: str = pipeline(
"""document-question-answering""" , model=A , tokenizer=A , image_processor=A )
__snake_case: Dict = INVOICE_URL
__snake_case: Optional[Any] = list(zip(*apply_tesseract(load_image(A ) , A , """""" ) ) )
__snake_case: Optional[int] = """What is the placebo?"""
__snake_case: Optional[Any] = [
{
"""image""": load_image(A ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase__ ( self : int , A : Optional[Any] , A : Tuple ):
__snake_case: Tuple = dqa_pipeline(A , top_k=2 )
self.assertEqual(
A , [
[
{"""score""": ANY(A ), """answer""": ANY(A ), """start""": ANY(A ), """end""": ANY(A )},
{"""score""": ANY(A ), """answer""": ANY(A ), """start""": ANY(A ), """end""": ANY(A )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
__snake_case: Optional[int] = INVOICE_URL
__snake_case: List[str] = """How many cats are there?"""
__snake_case: Any = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
__snake_case: Dict = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(nested_simplify(A , decimals=4 ) , A )
__snake_case: Tuple = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(A , decimals=4 ) , A )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case: List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__snake_case: Union[str, Any] = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(A , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case: Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__snake_case: int = []
__snake_case: int = []
__snake_case: str = dqa_pipeline(image=A , question=A , words=A , boxes=A , top_k=2 )
self.assertEqual(A , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: int = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
__snake_case: Tuple = INVOICE_URL
__snake_case: List[str] = """What is the invoice number?"""
__snake_case: int = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: Optional[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: Any = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self : str ):
__snake_case: Dict = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
__snake_case: int = INVOICE_URL
__snake_case: int = """What is the invoice number?"""
__snake_case: Dict = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: Any = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: Any = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=A )
__snake_case: Optional[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=A , revision="""3dc6de3""" , )
__snake_case: List[str] = INVOICE_URL
__snake_case: Dict = """What is the invoice number?"""
__snake_case: Tuple = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
__snake_case: Tuple = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
__snake_case: Any = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
__snake_case: int = list(zip(*apply_tesseract(load_image(A ) , A , """""" ) ) )
# This model should also work if `image` is set to None
__snake_case: Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Union[str, Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=A )
__snake_case: Dict = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=A , revision="""3dc6de3""" , max_seq_len=50 , )
__snake_case: Tuple = INVOICE_URL
__snake_case: Optional[Any] = """What is the invoice number?"""
__snake_case: Optional[int] = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
__snake_case: Optional[Any] = list(zip(*apply_tesseract(load_image(A ) , A , """""" ) ) )
# This model should also work if `image` is set to None
__snake_case: Optional[int] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
__snake_case: str = INVOICE_URL
__snake_case: Optional[Any] = """What is the invoice number?"""
__snake_case: Optional[int] = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(nested_simplify(A , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def UpperCAmelCase__ ( self : str ):
pass
| 357 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self : Any , A : Optional[Any] , A : List[Any]=13 , A : str=7 , A : Dict=True , A : Dict=True , A : str=True , A : Any=True , A : Optional[int]=99 , A : Dict=64 , A : Optional[Any]=32 , A : Union[str, Any]=5 , A : Any=4 , A : List[str]=37 , A : List[Any]="gelu" , A : Tuple=0.1 , A : str=0.1 , A : Optional[Any]=512 , A : Union[str, Any]=16 , A : List[str]=2 , A : Optional[Any]=0.02 , A : Union[str, Any]=3 , A : Any=4 , A : str=None , ):
__snake_case: List[Any] = parent
__snake_case: Any = batch_size
__snake_case: List[Any] = seq_length
__snake_case: Optional[Any] = is_training
__snake_case: List[Any] = use_input_mask
__snake_case: Optional[int] = use_token_type_ids
__snake_case: List[str] = use_labels
__snake_case: Dict = vocab_size
__snake_case: Optional[Any] = hidden_size
__snake_case: Optional[int] = embedding_size
__snake_case: Optional[Any] = num_hidden_layers
__snake_case: List[str] = num_attention_heads
__snake_case: Any = intermediate_size
__snake_case: Any = hidden_act
__snake_case: Tuple = hidden_dropout_prob
__snake_case: Dict = attention_probs_dropout_prob
__snake_case: Optional[int] = max_position_embeddings
__snake_case: Optional[Any] = type_vocab_size
__snake_case: Union[str, Any] = type_sequence_label_size
__snake_case: List[Any] = initializer_range
__snake_case: Any = num_labels
__snake_case: List[Any] = num_choices
__snake_case: Optional[int] = scope
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case: Any = None
if self.use_input_mask:
__snake_case: Any = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case: Optional[int] = None
if self.use_token_type_ids:
__snake_case: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case: Tuple = None
__snake_case: Union[str, Any] = None
__snake_case: Optional[Any] = None
if self.use_labels:
__snake_case: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case: int = ids_tensor([self.batch_size] , self.num_choices )
__snake_case: int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[Any] ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : List[str] , A : int , A : List[str] , A : Optional[Any] , A : Union[str, Any] , A : Any , A : Any , A : List[Any] ):
__snake_case: Optional[Any] = MobileBertModel(config=A )
model.to(A )
model.eval()
__snake_case: Dict = model(A , attention_mask=A , token_type_ids=A )
__snake_case: Dict = model(A , token_type_ids=A )
__snake_case: List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict , A : str , A : Dict , A : List[str] , A : Optional[int] , A : Union[str, Any] , A : Union[str, Any] , A : str ):
__snake_case: Dict = MobileBertForMaskedLM(config=A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , A : Dict , A : Tuple , A : Tuple , A : List[str] , A : Any , A : str , A : int ):
__snake_case: str = MobileBertForNextSentencePrediction(config=A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Optional[Any] , A : List[str] , A : Dict , A : int , A : str , A : Union[str, Any] ):
__snake_case: Dict = MobileBertForPreTraining(config=A )
model.to(A )
model.eval()
__snake_case: List[Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , next_sentence_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self : Dict , A : Union[str, Any] , A : str , A : int , A : Dict , A : Dict , A : Any , A : List[str] ):
__snake_case: List[str] = MobileBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
__snake_case: Tuple = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Tuple , A : List[str] , A : Any , A : List[Any] , A : Union[str, Any] , A : Dict , A : int , A : Optional[int] ):
__snake_case: Dict = self.num_labels
__snake_case: List[str] = MobileBertForSequenceClassification(A )
model.to(A )
model.eval()
__snake_case: List[str] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Dict , A : str , A : Tuple , A : int , A : List[Any] , A : List[str] , A : int ):
__snake_case: Optional[int] = self.num_labels
__snake_case: Dict = MobileBertForTokenClassification(config=A )
model.to(A )
model.eval()
__snake_case: Optional[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : int , A : Any , A : Any , A : Optional[Any] , A : str , A : int , A : str , A : Dict ):
__snake_case: Any = self.num_choices
__snake_case: List[str] = MobileBertForMultipleChoice(config=A )
model.to(A )
model.eval()
__snake_case: Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case: Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case: Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case: Optional[Any] = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
): Any = config_and_inputs
__snake_case: List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : List[Any] , A : str , A : Tuple , A : Union[str, Any]=False ):
__snake_case: Union[str, Any] = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
__snake_case: List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
__snake_case: List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: int = MobileBertModelTester(self )
__snake_case: Tuple = ConfigTester(self , config_class=A , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A )
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
return torch.tensor(
SCREAMING_SNAKE_CASE__ , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , )
__UpperCAmelCase : Any = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(A )
__snake_case: List[Any] = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__snake_case: Dict = model(A )[0]
__snake_case: str = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , A )
__snake_case: str = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=A , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__snake_case: str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__snake_case: int = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 111 |
def A__ ( SCREAMING_SNAKE_CASE__ = 200) -> int:
__snake_case: Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
__snake_case: List[Any] = [0] * (pence + 1)
__snake_case: int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(SCREAMING_SNAKE_CASE__ , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 111 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "xmod"
def __init__( self : List[str] , __A : List[str]=3_0_5_2_2 , __A : Tuple=7_6_8 , __A : str=1_2 , __A : List[Any]=1_2 , __A : List[str]=3_0_7_2 , __A : List[str]="gelu" , __A : List[Any]=0.1 , __A : Tuple=0.1 , __A : str=5_1_2 , __A : Union[str, Any]=2 , __A : List[Any]=0.0_2 , __A : List[str]=1e-1_2 , __A : Tuple=1 , __A : List[Any]=0 , __A : Optional[Any]=2 , __A : Optional[int]="absolute" , __A : Optional[int]=True , __A : Dict=None , __A : Optional[int]=False , __A : Dict=2 , __A : List[str]=False , __A : Dict=True , __A : Union[str, Any]=True , __A : Tuple=("en_XX",) , __A : Optional[Any]=None , **__A : Tuple , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : Tuple = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Optional[int] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : List[str] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : List[str] = use_cache
snake_case__ : Tuple = classifier_dropout
snake_case__ : Any = pre_norm
snake_case__ : List[str] = adapter_reduction_factor
snake_case__ : List[Any] = adapter_layer_norm
snake_case__ : str = adapter_reuse_layer_norm
snake_case__ : Union[str, Any] = ln_before_adapter
snake_case__ : Tuple = list(__A )
snake_case__ : int = default_language
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : int ):
if self.task == "multiple-choice":
snake_case__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "roberta-prelayernorm"
def __init__( self : Tuple , __A : Any=5_0_2_6_5 , __A : Optional[int]=7_6_8 , __A : Dict=1_2 , __A : Union[str, Any]=1_2 , __A : List[Any]=3_0_7_2 , __A : Optional[Any]="gelu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[Any]=5_1_2 , __A : List[str]=2 , __A : Optional[int]=0.0_2 , __A : Tuple=1e-1_2 , __A : Any=1 , __A : str=0 , __A : int=2 , __A : List[str]="absolute" , __A : Optional[Any]=True , __A : List[Any]=None , **__A : Optional[Any] , ):
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : int = max_position_embeddings
snake_case__ : Tuple = type_vocab_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : int = layer_norm_eps
snake_case__ : Dict = position_embedding_type
snake_case__ : int = use_cache
snake_case__ : Dict = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[int] ):
if self.task == "multiple-choice":
snake_case__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 286 | 1 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__lowercase : List[Any] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__lowercase : Dict = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
__lowercase : int = 'zero2'
__lowercase : Dict = 'zero3'
__lowercase : List[Any] = [ZEROa, ZEROa]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a : Dict = parameterized.to_safe_name('_'.join(str(_SCREAMING_SNAKE_CASE ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__lowercase : List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __UpperCamelCase ( lowerCAmelCase_ ):
@parameterized.expand(__a , name_func=__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@parameterized.expand(__a , name_func=__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
@require_torch_multi_gpu
@parameterized.expand(__a , name_func=__a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
self.run_and_check(
stage=__a , model=__a , distributed=__a , fpaa=__a , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self , __a , __a , __a = 10 , __a = True , __a = True , __a = True , ):
'''simple docstring'''
__a : Any = models[model]
__a : Union[str, Any] = self.run_trainer(
stage=__a , model_name=__a , eval_steps=__a , num_train_epochs=1 , distributed=__a , fpaa=__a , )
self.do_checks(__a )
return output_dir
def __UpperCAmelCase ( self , __a , __a , __a = 10 , __a = 1 , __a = True , __a = True , ):
'''simple docstring'''
__a : Tuple = self.get_auto_remove_tmp_dir('./xxx' , after=__a )
__a : Optional[Any] = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__a )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a : int = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
__a : Union[str, Any] = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
__a : Dict = self.get_launcher(__a )
__a : Tuple = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__a , env=self.get_env() )
return output_dir
def __UpperCAmelCase ( self , __a=False ):
'''simple docstring'''
__a : int = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 27 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_A = Vector()
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) , 4 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2] )
_A = Vector([1, 2, 3, 4, 5] )
_A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_A = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCAmelCase_ ( self : str ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
_A = Vector([2, -1, 4] ) # for test of dot product
_A = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def lowerCAmelCase_ ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 0, 0, 0, 0, 0] )
_A = x.copy()
self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : str ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_A = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase_ ( self : int ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 315 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCamelCase__ = list[list[float | int]]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : float
for row in range(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = matrix[row][col]
_UpperCAmelCase : Optional[int] = vector[row][0]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = 0
while row < size and col < size:
# pivoting
_UpperCAmelCase : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__lowerCAmelCase , __lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_UpperCAmelCase , _UpperCAmelCase : str = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[rowa][col] / augmented[row][col]
_UpperCAmelCase : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __lowerCAmelCase ):
for row in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = augmented[row][col] / augmented[col][col]
for cola in range(__lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__lowerCAmelCase )
]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = len(__lowerCAmelCase )
_UpperCAmelCase : Matrix = [[0 for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix = [[0] for _ in range(__lowerCAmelCase )]
_UpperCAmelCase : Matrix
_UpperCAmelCase : int
_UpperCAmelCase : int
_UpperCAmelCase : int
for x_val, y_val in enumerate(__lowerCAmelCase ):
for col in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = (x_val + 1) ** (size - col - 1)
_UpperCAmelCase : int = y_val
_UpperCAmelCase : List[str] = solve(__lowerCAmelCase , __lowerCAmelCase )
def interpolated_func(__lowerCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__lowerCAmelCase ) )
return interpolated_func
def __lowerCAmelCase (__lowerCAmelCase ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase (__lowerCAmelCase = question_function , __lowerCAmelCase = 10 ):
_UpperCAmelCase : list[int] = [func(__lowerCAmelCase ) for x_val in range(1 , order + 1 )]
_UpperCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_UpperCAmelCase : int = 0
_UpperCAmelCase : Callable[[int], int]
_UpperCAmelCase : int
for poly in polynomials:
_UpperCAmelCase : int = 1
while func(__lowerCAmelCase ) == poly(__lowerCAmelCase ):
x_val += 1
ret += poly(__lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 322 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322 | 1 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = DownBlockaD # noqa F405
UpperCamelCase : str = 'down'
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =[-0.0_2_3_2, -0.9_8_6_9, 0.8_0_5_4, -0.0_6_3_7, -0.1_6_8_8, -1.4_2_6_4, 0.4_4_7_0, -1.3_3_9_4, 0.0_9_0_4]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = ResnetDownsampleBlockaD # noqa F405
UpperCamelCase : Any = 'down'
def lowerCamelCase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =[0.0_7_1_0, 0.2_4_1_0, -0.7_3_2_0, -1.0_7_5_7, -1.1_3_4_3, 0.3_5_4_0, -0.0_1_3_3, -0.2_5_7_6, 0.0_9_4_8]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Dict = AttnDownBlockaD # noqa F405
UpperCamelCase : List[str] = 'down'
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =[0.0_6_3_6, 0.8_9_6_4, -0.6_2_3_4, -1.0_1_3_1, 0.0_8_4_4, 0.4_9_3_5, 0.3_4_3_7, 0.0_9_1_1, -0.2_9_5_7]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = CrossAttnDownBlockaD # noqa F405
UpperCamelCase : Dict = 'down'
def lowerCamelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: str =32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =[0.2_2_3_8, -0.7_3_9_6, -0.2_2_5_5, -0.3_8_2_9, 0.1_9_2_5, 1.1_6_6_5, 0.0_6_0_3, -0.7_2_9_5, 0.1_9_8_3]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Any = SimpleCrossAttnDownBlockaD # noqa F405
UpperCamelCase : Union[str, Any] = 'down'
@property
def lowerCamelCase__ ( self : Any ) -> int:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Tuple =32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def lowerCamelCase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =[0.7_9_2_1, -0.0_9_9_2, -0.1_9_6_2, -0.7_6_9_5, -0.4_2_4_2, 0.7_8_0_4, 0.4_7_3_7, 0.2_7_6_5, 0.3_3_3_8]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : int = SkipDownBlockaD # noqa F405
UpperCamelCase : List[Any] = 'down'
@property
def lowerCamelCase__ ( self : int ) -> Dict:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[-0.0_8_4_5, -0.2_0_8_7, -0.2_4_6_5, 0.0_9_7_1, 0.1_9_0_0, -0.0_4_8_4, 0.2_6_6_4, 0.4_1_7_9, 0.5_0_6_9]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = AttnSkipDownBlockaD # noqa F405
UpperCamelCase : Optional[Any] = 'down'
@property
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return super().get_dummy_input(include_skip_sample=lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[0.5_5_3_9, 0.1_6_0_9, 0.4_9_2_4, 0.0_5_3_7, -0.1_9_9_5, 0.4_0_5_0, 0.0_9_7_9, -0.2_7_2_1, -0.0_6_4_2]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : str = DownEncoderBlockaD # noqa F405
UpperCamelCase : Any = 'down'
@property
def lowerCamelCase__ ( self : List[str] ) -> int:
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCAmelCase )
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
"""in_channels""": 32,
"""out_channels""": 32,
}
SCREAMING_SNAKE_CASE_: Dict =self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =[1.1_1_0_2, 0.5_3_0_2, 0.4_8_7_2, -0.0_0_2_3, -0.8_0_4_2, 0.0_4_8_3, -0.3_4_8_9, -0.5_6_3_2, 0.7_6_2_6]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : List[Any] = AttnDownEncoderBlockaD # noqa F405
UpperCamelCase : Optional[int] = 'down'
@property
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={
"""in_channels""": 32,
"""out_channels""": 32,
}
SCREAMING_SNAKE_CASE_: int =self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =[0.8_9_6_6, -0.1_4_8_6, 0.8_5_6_8, 0.8_1_4_1, -0.9_0_4_6, -0.1_3_4_2, -0.0_9_7_2, -0.7_4_1_7, 0.1_5_3_8]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = UNetMidBlockaD # noqa F405
UpperCamelCase : Any = 'mid'
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str ={
"""in_channels""": 32,
"""temb_channels""": 128,
}
SCREAMING_SNAKE_CASE_: List[Any] =self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =[-0.1_0_6_2, 1.7_2_4_8, 0.3_4_9_4, 1.4_5_6_9, -0.0_9_1_0, -1.2_4_2_1, -0.9_9_8_4, 0.6_7_3_6, 1.0_0_2_8]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = UNetMidBlockaDCrossAttn # noqa F405
UpperCamelCase : List[Any] = 'mid'
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Dict =32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =[0.0_1_8_7, 2.4_2_2_0, 0.4_4_8_4, 1.1_2_0_3, -0.6_1_2_1, -1.5_1_2_2, -0.8_2_7_0, 0.7_8_5_1, 1.8_3_3_5]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
UpperCamelCase : Tuple = 'mid'
@property
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase )
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int =super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Union[str, Any] =32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =[0.7_1_4_3, 1.9_9_7_4, 0.5_4_4_8, 1.3_9_7_7, 0.1_2_8_2, -1.1_2_3_7, -1.4_2_3_8, 0.5_5_3_0, 0.8_8_8_0]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Any = UpBlockaD # noqa F405
UpperCamelCase : int = 'up'
@property
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =[-0.2_0_4_1, -0.4_1_6_5, -0.3_0_2_2, 0.0_0_4_1, -0.6_6_2_8, -0.7_0_5_3, 0.1_9_2_8, -0.0_3_2_5, 0.0_5_2_3]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[int] = ResnetUpsampleBlockaD # noqa F405
UpperCamelCase : Any = 'up'
@property
def lowerCamelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =[0.2_2_8_7, 0.3_5_4_9, -0.1_3_4_6, 0.4_7_9_7, -0.1_7_1_5, -0.9_6_4_9, 0.7_3_0_5, -0.5_8_6_4, -0.6_2_4_4]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : int = CrossAttnUpBlockaD # noqa F405
UpperCamelCase : List[str] = 'up'
@property
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Tuple =32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =[-0.1_4_0_3, -0.3_5_1_5, -0.0_4_2_0, -0.1_4_2_5, 0.3_1_6_7, 0.5_0_9_4, -0.2_1_8_1, 0.5_9_3_1, 0.5_5_8_2]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = SimpleCrossAttnUpBlockaD # noqa F405
UpperCamelCase : Tuple = 'up'
@property
def lowerCamelCase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase , include_encoder_hidden_states=lowerCAmelCase )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Tuple =32
return init_dict, inputs_dict
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =[0.2_6_4_5, 0.1_4_8_0, 0.0_9_0_9, 0.8_0_4_4, -0.9_7_5_8, -0.9_0_8_3, 0.0_9_9_4, -1.1_4_5_3, -0.7_4_0_2]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Dict = AttnUpBlockaD # noqa F405
UpperCamelCase : Dict = 'up'
@property
def lowerCamelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =[0.0_9_7_9, 0.1_3_2_6, 0.0_0_2_1, 0.0_6_5_9, 0.2_2_4_9, 0.0_0_5_9, 0.1_1_3_2, 0.5_9_5_2, 0.1_0_3_3]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : List[Any] = SkipUpBlockaD # noqa F405
UpperCamelCase : int = 'up'
@property
def lowerCamelCase__ ( self : int ) -> Tuple:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =[-0.0_8_9_3, -0.1_2_3_4, -0.1_5_0_6, -0.0_3_3_2, 0.0_1_2_3, -0.0_2_1_1, 0.0_5_6_6, 0.0_1_4_3, 0.0_3_6_2]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Dict = AttnSkipUpBlockaD # noqa F405
UpperCamelCase : str = 'up'
@property
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =[0.0_3_6_1, 0.0_6_1_7, 0.2_7_8_7, -0.0_3_5_0, 0.0_3_4_2, 0.3_4_2_1, -0.0_8_4_3, 0.0_9_1_3, 0.3_0_1_5]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = UpDecoderBlockaD # noqa F405
UpperCamelCase : Dict = 'up'
@property
def lowerCamelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCAmelCase )
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={"""in_channels""": 32, """out_channels""": 32}
SCREAMING_SNAKE_CASE_: Tuple =self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict =[0.4_4_0_4, 0.1_9_9_8, -0.9_8_8_6, -0.3_3_2_0, -0.3_1_2_8, -0.7_0_3_4, -0.6_9_5_5, -0.2_3_3_8, -0.3_1_3_7]
super().test_output(lowerCAmelCase )
class a ( UpperCAmelCase__ , unittest.TestCase ):
UpperCamelCase : str = AttnUpDecoderBlockaD # noqa F405
UpperCamelCase : int = 'up'
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return super().get_dummy_input(include_temb=lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str ={"""in_channels""": 32, """out_channels""": 32}
SCREAMING_SNAKE_CASE_: int =self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =[0.6_7_3_8, 0.4_4_9_1, 0.1_0_5_5, 1.0_7_1_0, 0.7_3_1_6, 0.3_3_3_9, 0.3_3_5_2, 0.1_0_2_3, 0.3_5_6_8]
super().test_output(lowerCAmelCase )
| 173 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def __magic_name__ ( lowercase ):
return np.maximum(0 , lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 173 | 1 |
from __future__ import annotations
lowercase_ = list[list[int]]
# assigning initial values to the grid
lowercase_ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase_ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if location := find_empty_location(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = digit
if sudoku(SCREAMING_SNAKE_CASE__ ) is not None:
return grid
__lowerCamelCase : Optional[int] = 0
return None
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for row in grid:
for cell in row:
print(SCREAMING_SNAKE_CASE__ , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 2_0)
print_solution(example_grid)
print('\nExample grid solution:')
lowercase_ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 351 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: List[str] , *a: List[Any] , **a: Optional[Any] ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 194 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ , A__ ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A__ , n - 1 , A__ ) * a) % mod
else:
UpperCamelCase = binary_exponentiation(A__ , n / 2 , A__ )
return (b * b) % mod
# a prime number
_lowerCamelCase : Dict = 701
_lowerCamelCase : Dict = 10_0000_0000
_lowerCamelCase : Tuple = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 28 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
| 28 | 1 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a__ : Dict = logging.get_logger(__name__)
a__ : Union[str, Any] = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class UpperCAmelCase__ ( UpperCAmelCase_):
@add_start_docstrings(lowercase )
def __call__( self , lowercase , lowercase , **lowercase ) -> bool:
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = None ) -> Any:
__UpperCamelCase = max_length
__UpperCamelCase = max_position_embeddings
@add_start_docstrings(lowercase )
def __call__( self , lowercase , lowercase , **lowercase ) -> bool:
__UpperCamelCase = input_ids.shape[-1]
__UpperCamelCase = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase ) -> int:
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"""with `max_length = start_length + max_new_tokens` instead.""" , lowercase , )
__UpperCamelCase = start_length
__UpperCamelCase = max_new_tokens
__UpperCamelCase = start_length + max_new_tokens
@add_start_docstrings(lowercase )
def __call__( self , lowercase , lowercase , **lowercase ) -> bool:
return input_ids.shape[-1] >= self.max_length
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase = None ) -> Optional[Any]:
__UpperCamelCase = max_time
__UpperCamelCase = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowercase )
def __call__( self , lowercase , lowercase , **lowercase ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class UpperCAmelCase__ ( UpperCAmelCase_):
@add_start_docstrings(lowercase )
def __call__( self , lowercase , lowercase , **lowercase ) -> bool:
return any(criteria(lowercase , lowercase ) for criteria in self )
@property
def __lowerCamelCase ( self ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(lowercase , lowercase ):
return stopping_criterium.max_length
elif isinstance(lowercase , lowercase ):
return stopping_criterium.max_length
return None
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = stopping_criteria.max_length
__UpperCamelCase = deepcopy(__A )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" ,__A )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__A ) )
return new_stopping_criteria
| 366 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
a__ : Dict = ''
a__ : List[str] = ''
a__ : Optional[Any] = ''
a__ : Any = ''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = tweepy.OAuthHandler(__A ,__A )
auth.set_access_token(__A ,__A )
__UpperCamelCase = tweepy.API(__A )
# initialize a list to hold all the tweepy Tweets
__UpperCamelCase = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__UpperCamelCase = api.user_timeline(screen_name=__A ,count=200 )
# save most recent tweets
alltweets.extend(__A )
# save the id of the oldest tweet less one
__UpperCamelCase = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(__A ) > 0:
print(f"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__UpperCamelCase = api.user_timeline(
screen_name=__A ,count=200 ,max_id=__A )
# save most recent tweets
alltweets.extend(__A )
# update the id of the oldest tweet less one
__UpperCamelCase = alltweets[-1].id - 1
print(f"...{len(__A )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__UpperCamelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"new_{screen_name}_tweets.csv" ,"""w""" ) as f:
__UpperCamelCase = csv.writer(__A )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(__A )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 243 | 0 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"artists_file": "artists.json",
"lyrics_file": "lyrics.json",
"genres_file": "genres.json",
}
UpperCamelCase_ = {
"artists_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json",
},
"genres_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json",
},
"lyrics_file": {
"jukebox": "https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json",
},
}
UpperCamelCase_ = {
"jukebox": 5_1_2,
}
class _a ( A__ ):
'''simple docstring'''
A : List[str] = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_LYRIC_TOKENS_SIZES
A : Any = ["input_ids", "attention_mask"]
def __init__( self, A, A, A, A=["v3", "v2", "v2"], A=512, A=5, A="<|endoftext|>", **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else unk_token
super().__init__(
unk_token=__lowercase, n_genres=__lowercase, version=__lowercase, max_n_lyric_tokens=__lowercase, **__lowercase, )
SCREAMING_SNAKE_CASE : int = version
SCREAMING_SNAKE_CASE : List[Any] = max_n_lyric_tokens
SCREAMING_SNAKE_CASE : Tuple = n_genres
with open(__lowercase, encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(__lowercase )
with open(__lowercase, encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE : Optional[int] = json.load(__lowercase )
with open(__lowercase, encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE : Optional[int] = json.load(__lowercase )
SCREAMING_SNAKE_CASE : List[str] = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
SCREAMING_SNAKE_CASE : Tuple = oov.replace(r'\-\'', r'\-+\'' )
SCREAMING_SNAKE_CASE : Optional[int] = regex.compile(__lowercase )
SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.artists_encoder.items()}
SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.genres_encoder.items()}
SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCamelCase_ ( self ):
'''simple docstring'''
return dict(self.artists_encoder, self.genres_encoder, self.lyrics_encoder )
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.artists_encoder.get(__lowercase, 0 ) for artist in list_artists]
for genres in range(len(__lowercase ) ):
SCREAMING_SNAKE_CASE : List[str] = [self.genres_encoder.get(__lowercase, 0 ) for genre in list_genres[genres]]
SCREAMING_SNAKE_CASE : List[str] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
SCREAMING_SNAKE_CASE : Tuple = [[self.lyrics_encoder.get(__lowercase, 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return list(__lowercase )
def UpperCamelCase_ ( self, A, A, A, **A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_for_tokenization(__lowercase, __lowercase, __lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = self._tokenize(__lowercase )
return artist, genre, lyrics
def UpperCamelCase_ ( self, A, A, A, A = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
SCREAMING_SNAKE_CASE : Dict = artists[idx].lower()
SCREAMING_SNAKE_CASE : Optional[Any] = [genres[idx].lower()]
else:
SCREAMING_SNAKE_CASE : str = self._normalize(artists[idx] ) + """.v2"""
SCREAMING_SNAKE_CASE : str = [
self._normalize(__lowercase ) + """.v2""" for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
SCREAMING_SNAKE_CASE : Any = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
SCREAMING_SNAKE_CASE : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
SCREAMING_SNAKE_CASE : str = {vocab[index]: index + 1 for index in range(len(__lowercase ) )}
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : int = len(__lowercase ) + 1
SCREAMING_SNAKE_CASE : str = self.vocab
SCREAMING_SNAKE_CASE : Any = {v: k for k, v in self.vocab.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
else:
SCREAMING_SNAKE_CASE : Tuple = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
SCREAMING_SNAKE_CASE : str = self._run_strip_accents(__lowercase )
SCREAMING_SNAKE_CASE : Tuple = lyrics.replace('\\', '\n' )
SCREAMING_SNAKE_CASE : int = self.out_of_vocab.sub('', __lowercase ), [], []
return artists, genres, lyrics
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = unicodedata.normalize('NFD', __lowercase )
SCREAMING_SNAKE_CASE : int = []
for char in text:
SCREAMING_SNAKE_CASE : List[Any] = unicodedata.category(__lowercase )
if cat == "Mn":
continue
output.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = (
[chr(__lowercase ) for i in range(ord('a' ), ord('z' ) + 1 )]
+ [chr(__lowercase ) for i in range(ord('A' ), ord('Z' ) + 1 )]
+ [chr(__lowercase ) for i in range(ord('0' ), ord('9' ) + 1 )]
+ ["""."""]
)
SCREAMING_SNAKE_CASE : Any = frozenset(__lowercase )
SCREAMING_SNAKE_CASE : Dict = re.compile(r'_+' )
SCREAMING_SNAKE_CASE : Union[str, Any] = """""".join([c if c in accepted else '_' for c in text.lower()] )
SCREAMING_SNAKE_CASE : int = pattern.sub('_', __lowercase ).strip('_' )
return text
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
return " ".join(__lowercase )
def UpperCamelCase_ ( self, A, A = None, A = False ):
'''simple docstring'''
if not isinstance(__lowercase, __lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = TensorType(__lowercase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
SCREAMING_SNAKE_CASE : List[str] = tf.constant
SCREAMING_SNAKE_CASE : Tuple = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
SCREAMING_SNAKE_CASE : Dict = torch.tensor
SCREAMING_SNAKE_CASE : Dict = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
SCREAMING_SNAKE_CASE : int = jnp.array
SCREAMING_SNAKE_CASE : List[Any] = _is_jax
else:
SCREAMING_SNAKE_CASE : Tuple = np.asarray
SCREAMING_SNAKE_CASE : Union[str, Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
SCREAMING_SNAKE_CASE : Tuple = [inputs]
if not is_tensor(__lowercase ):
SCREAMING_SNAKE_CASE : Dict = as_tensor(__lowercase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self, A, A, A="", A="pt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [0, 0, 0]
SCREAMING_SNAKE_CASE : Any = [artist] * len(self.version )
SCREAMING_SNAKE_CASE : str = [genres] * len(self.version )
SCREAMING_SNAKE_CASE : List[str] = self.tokenize(__lowercase, __lowercase, __lowercase )
SCREAMING_SNAKE_CASE : Dict = self._convert_token_to_id(__lowercase, __lowercase, __lowercase )
SCREAMING_SNAKE_CASE : List[Any] = [-INFINITY] * len(full_tokens[-1] )
SCREAMING_SNAKE_CASE : Dict = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]], tensor_type=__lowercase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def UpperCamelCase_ ( self, A, A = None ):
'''simple docstring'''
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowercase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(__lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder, ensure_ascii=__lowercase ) )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(
__lowercase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(__lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder, ensure_ascii=__lowercase ) )
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
__lowercase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(__lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder, ensure_ascii=__lowercase ) )
return (artists_file, genres_file, lyrics_file)
def UpperCamelCase_ ( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.artists_decoder.get(__lowercase )
SCREAMING_SNAKE_CASE : List[str] = [self.genres_decoder.get(__lowercase ) for genre in genres_index]
SCREAMING_SNAKE_CASE : Any = [self.lyrics_decoder.get(__lowercase ) for character in lyric_index]
return artist, genres, lyrics
| 251 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class snake_case__ (datasets.BuilderConfig ):
"""simple docstring"""
__lowerCAmelCase :Optional[datasets.Features] = None
class snake_case__ (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__lowerCAmelCase :Dict = PandasConfig
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Tuple:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
a__ : str = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowercase , (str, list, tuple) ):
a__ : Optional[int] = data_files
if isinstance(__lowercase , __lowercase ):
a__ : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a__ : str = [dl_manager.iter_files(__lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
a__ : List[str] = []
for split_name, files in data_files.items():
if isinstance(__lowercase , __lowercase ):
a__ : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a__ : Dict = [dl_manager.iter_files(__lowercase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowercase , gen_kwargs={"""files""": files} ) )
return splits
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a__ : Tuple = table_cast(__lowercase , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[Any]:
"""simple docstring"""
for i, file in enumerate(itertools.chain.from_iterable(__lowercase ) ):
with open(__lowercase , """rb""" ) as f:
a__ : str = pa.Table.from_pandas(pd.read_pickle(__lowercase ) )
yield i, self._cast_table(__lowercase )
| 170 | 0 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = []
for line in lines:
a_ = re.sub(r"#.*" , "" , UpperCAmelCase ) # remove comments
if line:
filtered_lines.append(UpperCAmelCase )
a_ = "\n".join(UpperCAmelCase )
# Make a hash from all this code
a_ = full_str.encode("utf-8" )
return shaaaa(UpperCAmelCase ).hexdigest()
# get importable module names and hash for caching
UpperCamelCase_ = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
UpperCamelCase_ = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
UpperCamelCase_ = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
UpperCamelCase_ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip') | 303 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3)) | 303 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,A : Tuple ,A : Union[str, Any]=13 ,A : List[Any]=7 ,A : List[Any]=True ,A : List[str]=True ,A : List[str]=True ,A : Tuple=True ,A : Optional[Any]=99 ,A : Union[str, Any]=64 ,A : str=32 ,A : int=5 ,A : Union[str, Any]=4 ,A : Union[str, Any]=37 ,A : List[Any]="gelu" ,A : Any=0.1 ,A : Any=0.1 ,A : Any=5_12 ,A : Optional[Any]=16 ,A : int=2 ,A : Union[str, Any]=0.02 ,A : Any=3 ,A : Union[str, Any]=4 ,A : Union[str, Any]=None ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = embedding_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def UpperCamelCase_ ( self : Optional[int] ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : int ):
return MobileBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Dict ,A : int ,A : Tuple ,A : Optional[int] ,A : List[Any] ,A : Optional[int] ,A : Optional[int] ,A : Optional[Any] ):
__A = MobileBertModel(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A )
__A = model(A ,token_type_ids=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : Tuple ,A : Tuple ,A : Union[str, Any] ,A : Tuple ,A : Union[str, Any] ,A : Union[str, Any] ,A : int ,A : Optional[int] ):
__A = MobileBertForMaskedLM(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Optional[int] ,A : str ,A : Any ,A : Optional[int] ,A : Optional[Any] ,A : List[Any] ,A : Tuple ,A : Dict ):
__A = MobileBertForNextSentencePrediction(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self : str ,A : Optional[Any] ,A : Optional[int] ,A : Optional[Any] ,A : Tuple ,A : List[str] ,A : Optional[int] ,A : Dict ):
__A = MobileBertForPreTraining(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,next_sentence_label=A ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def UpperCamelCase_ ( self : Optional[Any] ,A : Any ,A : List[str] ,A : List[str] ,A : Optional[int] ,A : Dict ,A : Optional[int] ,A : Any ):
__A = MobileBertForQuestionAnswering(config=A )
model.to(A )
model.eval()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,start_positions=A ,end_positions=A ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : str ,A : int ,A : Tuple ,A : Optional[Any] ,A : int ,A : Union[str, Any] ,A : List[Any] ,A : Dict ):
__A = self.num_labels
__A = MobileBertForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Optional[int] ,A : Optional[Any] ,A : List[str] ,A : List[str] ,A : Optional[int] ,A : int ,A : Any ,A : Any ):
__A = self.num_labels
__A = MobileBertForTokenClassification(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : int ,A : Dict ,A : List[str] ,A : List[str] ,A : List[Any] ,A : Optional[Any] ,A : str ,A : Any ):
__A = self.num_choices
__A = MobileBertForMultipleChoice(config=A )
model.to(A )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__A = model(
A ,attention_mask=A ,token_type_ids=A ,labels=A ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCamelCase_ ( self : Any ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def UpperCamelCase_ ( self : int ,A : Dict ,A : Optional[int] ,A : int=False ):
__A = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class in get_values(A ):
__A = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=A )
__A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def UpperCamelCase_ ( self : List[str] ):
__A = MobileBertModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*A )
def UpperCamelCase_ ( self : int ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*A )
def UpperCamelCase_ ( self : Any ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*A )
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE :Union[str, Any] = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
__A = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(A )
__A = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__A = model(A )[0]
__A = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape ,A )
__A = torch.tensor(
[
[
[-2.4736526E07, 8.2691656E04, 1.6521838E05],
[-5.7541704E-01, 3.9056022E00, 4.4011507E00],
[2.6047359E00, 1.5677652E00, -1.7324188E-01],
]
] ,device=A ,)
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__A = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__A = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 15 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=UpperCAmelCase , tokenizer=UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier(UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase , )
self.run_entailment_id(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 39 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a_ ( a__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = DanceDiffusionPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
__SCREAMING_SNAKE_CASE : Tuple = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : int = False
def __lowerCAmelCase ( self ) ->Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=1_6000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_lowerCamelCase , use_timestep_embedding=_lowerCamelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
SCREAMING_SNAKE_CASE : Optional[int] = IPNDMScheduler()
SCREAMING_SNAKE_CASE : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=0 ) ->Optional[int]:
if str(_lowerCamelCase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(_lowerCamelCase )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def __lowerCAmelCase ( self ) ->Optional[Any]:
SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Optional[Any] = DanceDiffusionPipeline(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : int = self.get_dummy_inputs(_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[int] = pipe(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = output.audios
SCREAMING_SNAKE_CASE : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self ) ->List[str]:
return super().test_save_load_local()
@skip_mps
def __lowerCAmelCase ( self ) ->Optional[Any]:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __lowerCAmelCase ( self ) ->Dict:
return super().test_save_load_optional_components()
@skip_mps
def __lowerCAmelCase ( self ) ->Any:
return super().test_attention_slicing_forward_pass()
def __lowerCAmelCase ( self ) ->List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) ->Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = torch_device
SCREAMING_SNAKE_CASE : Optional[Any] = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
SCREAMING_SNAKE_CASE : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = pipe(generator=_lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
SCREAMING_SNAKE_CASE : List[str] = output.audios
SCREAMING_SNAKE_CASE : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self ) ->Dict:
SCREAMING_SNAKE_CASE : Tuple = torch_device
SCREAMING_SNAKE_CASE : Optional[Any] = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe(generator=_lowerCamelCase , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
SCREAMING_SNAKE_CASE : List[Any] = output.audios
SCREAMING_SNAKE_CASE : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
SCREAMING_SNAKE_CASE : List[Any] = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 19 |
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 2
while True:
if is_prime(a__ ):
yield num
num += 1
def UpperCAmelCase_( a__ = 2_000_000 ):
"""simple docstring"""
return sum(takewhile(lambda a__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 19 | 1 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.txt"}
__A = {
"vocab_file": {
"facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt",
"facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt",
},
}
__A = {
"facebook/esm2_t6_8M_UR50D": 1_024,
"facebook/esm2_t12_35M_UR50D": 1_024,
}
def UpperCamelCase__ ( lowercase__ : Dict ):
with open(lowercase__ , "r" ) as f:
snake_case : Dict = f.read().splitlines()
return [l.strip() for l in lines]
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : List[str] = VOCAB_FILES_NAMES
a__ : int = PRETRAINED_VOCAB_FILES_MAP
a__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<cls>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<mask>" , SCREAMING_SNAKE_CASE="<eos>" , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
snake_case : Tuple = load_vocab_file(SCREAMING_SNAKE_CASE )
snake_case : Any = dict(enumerate(self.all_tokens ) )
snake_case : Any = {tok: ind for ind, tok in enumerate(self.all_tokens )}
snake_case : List[Any] = unk_token
snake_case : str = cls_token
snake_case : List[Any] = pad_token
snake_case : str = mask_token
snake_case : Any = eos_token
snake_case : Dict = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self._id_to_token.get(SCREAMING_SNAKE_CASE , self.unk_token )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self._token_to_id.get(SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return text.split()
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
return len(self._id_to_token )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self._token_to_id.get(SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self._id_to_token.get(SCREAMING_SNAKE_CASE , self.unk_token )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
snake_case : List[Any] = [self.cls_token_id]
snake_case : Tuple = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("Cannot tokenize multiple sequences when EOS token is not set!" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
snake_case : List[Any] = [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(SCREAMING_SNAKE_CASE ) + [1]
return mask
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : int = os.path.join(SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + "vocab.txt" )
with open(SCREAMING_SNAKE_CASE , "w" ) as f:
f.write("\n".join(self.all_tokens ) )
return (vocab_file,)
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
return super()._add_tokens(SCREAMING_SNAKE_CASE , special_tokens=SCREAMING_SNAKE_CASE )
| 148 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = eval_examples
snake_case : Any = post_process_function
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "eval" , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : Optional[int] = gen_kwargs.copy()
snake_case : Optional[int] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
snake_case : Any = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
snake_case : Optional[int] = gen_kwargs
snake_case : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case : List[Any] = self.get_eval_dataloader(SCREAMING_SNAKE_CASE )
snake_case : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : List[str] = self.compute_metrics
snake_case : Tuple = None
snake_case : str = time.time()
snake_case : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case : List[Any] = eval_loop(
SCREAMING_SNAKE_CASE , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , )
finally:
snake_case : List[str] = compute_metrics
snake_case : str = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case : Tuple = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : List[Any] = self.compute_metrics(SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case : Any = metrics.pop(SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
snake_case : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE )
return metrics
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = "test" , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = gen_kwargs.copy()
snake_case : int = self.get_test_dataloader(SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : Optional[int] = self.compute_metrics
snake_case : Dict = None
snake_case : int = time.time()
snake_case : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case : Dict = eval_loop(
SCREAMING_SNAKE_CASE , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , )
finally:
snake_case : Optional[int] = compute_metrics
snake_case : Dict = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case : int = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "predict" )
snake_case : Any = self.compute_metrics(SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case : List[str] = metrics.pop(SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE )
| 148 | 1 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def _a ( ):
"""simple docstring"""
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
import argparse
from collections import defaultdict
import yaml
lowerCAmelCase = 'docs/source/en/_toctree.yml'
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = defaultdict(SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(SCREAMING_SNAKE_CASE )
lowercase__ = new_doc_list
lowercase__ = [key for key, value in counts.items() if value > 1]
lowercase__ = []
for duplicate_key in duplicates:
lowercase__ = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError(
f'{duplicate_key} is present several times in the documentation table of content at '
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
lowercase__ = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(SCREAMING_SNAKE_CASE ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(SCREAMING_SNAKE_CASE )
# Sort
return overview_doc
def _a ( SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowercase__ = api_doc[scheduler_idx]['''sections''']
lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE )
lowercase__ = False
if new_scheduler_doc != scheduler_doc:
lowercase__ = True
if overwrite:
lowercase__ = new_scheduler_doc
if diff:
if overwrite:
lowercase__ = api_doc
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def _a ( SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
lowercase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__ = content[api_idx]['''sections''']
# Then to the model doc
lowercase__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowercase__ = False
lowercase__ = api_doc[pipeline_idx]['''sections''']
lowercase__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowercase__ = pipeline_doc['''section''']
lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE )
if overwrite:
lowercase__ = new_sub_pipeline_doc
new_pipeline_docs.append(SCREAMING_SNAKE_CASE )
# sort overall pipeline doc
lowercase__ = clean_doc_toc(SCREAMING_SNAKE_CASE )
if new_pipeline_docs != pipeline_docs:
lowercase__ = True
if overwrite:
lowercase__ = new_pipeline_docs
if diff:
if overwrite:
lowercase__ = api_doc
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE , allow_unicode=SCREAMING_SNAKE_CASE ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 93 | 0 |
import math
class _SCREAMING_SNAKE_CASE :
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> int:
lowerCamelCase_ = 0.0
lowerCamelCase_ = 0.0
for i in range(len(lowercase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase ) -> list[list[int | float]]:
for i in range(len(lowercase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase_ ( ):
# Training Examples ( m, n )
lowerCamelCase_ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCamelCase_ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCamelCase_ = SelfOrganizingMap()
lowerCamelCase_ = 3
lowerCamelCase_ = 0.5
for _ in range(lowerCamelCase__ ):
for j in range(len(lowerCamelCase__ ) ):
# training sample
lowerCamelCase_ = training_samples[j]
# Compute the winning vector
lowerCamelCase_ = self_organizing_map.get_winner(lowerCamelCase__ , lowerCamelCase__ )
# Update the winning vector
lowerCamelCase_ = self_organizing_map.update(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# classify test sample
lowerCamelCase_ = [0, 0, 0, 1]
lowerCamelCase_ = self_organizing_map.get_winner(lowerCamelCase__ , lowerCamelCase__ )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 19 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> int:
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
def __A ( self ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ImageGPTImageProcessor if is_vision_available() else None
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ImageGPTImageProcessingTester(self )
@property
def __A ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'clusters' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'image_processor.json' )
image_processor_first.to_json_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_json_file(lowerCAmelCase__ ).to_dict()
SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_pretrained(lowerCAmelCase__ ).to_dict()
SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def __A ( self ) -> Optional[Any]:
pass
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
SCREAMING_SNAKE_CASE = Image.open(dataset[4]['file'] )
SCREAMING_SNAKE_CASE = Image.open(dataset[5]['file'] )
SCREAMING_SNAKE_CASE = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
SCREAMING_SNAKE_CASE = prepare_images()
# test non-batched
SCREAMING_SNAKE_CASE = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
SCREAMING_SNAKE_CASE = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase__ )
# test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
SCREAMING_SNAKE_CASE = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase__ )
| 113 | 0 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : int = sin(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Any = _sin / (2 * q_factor)
_UpperCAmelCase : Any = (1 - _cos) / 2
_UpperCAmelCase : Tuple = 1 - _cos
_UpperCAmelCase : List[str] = 1 + alpha
_UpperCAmelCase : Union[str, Any] = -2 * _cos
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Dict = (1 + _cos) / 2
_UpperCAmelCase : Dict = -1 - _cos
_UpperCAmelCase : Optional[Any] = 1 + alpha
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[Any] = tau * frequency / samplerate
_UpperCAmelCase : Optional[int] = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : str = _sin / (2 * q_factor)
_UpperCAmelCase : Tuple = _sin / 2
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Dict = -ba
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : List[str] = -2 * _cos
_UpperCAmelCase : str = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = -2 * _cos
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Union[str, Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : int = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : int = 10 ** (gain_db / 40)
_UpperCAmelCase : Union[str, Any] = 1 + alpha * big_a
_UpperCAmelCase : int = -2 * _cos
_UpperCAmelCase : Any = 1 - alpha * big_a
_UpperCAmelCase : Dict = 1 + alpha / big_a
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha / big_a
_UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : List[Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
_UpperCAmelCase : int = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : List[str] = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Optional[int] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : Optional[Any] = big_a * (pmc + aaa)
_UpperCAmelCase : List[Any] = 2 * big_a * mpc
_UpperCAmelCase : Any = big_a * (pmc - aaa)
_UpperCAmelCase : Union[str, Any] = ppmc + aaa
_UpperCAmelCase : Dict = -2 * pmpc
_UpperCAmelCase : str = ppmc - aaa
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : str = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : str = 10 ** (gain_db / 40)
_UpperCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Union[str, Any] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : str = big_a * (ppmc + aaa)
_UpperCAmelCase : List[str] = -2 * big_a * pmpc
_UpperCAmelCase : Any = big_a * (ppmc - aaa)
_UpperCAmelCase : str = pmc + aaa
_UpperCAmelCase : Any = 2 * mpc
_UpperCAmelCase : Tuple = pmc - aaa
_UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 68 | 0 |
import re
def lowerCamelCase_ ( _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : int = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(_UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase_ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 279 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 | 1 |
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """pixel_values"""
_lowerCamelCase : Tuple = False
_lowerCamelCase : int = TimmBackboneConfig
def __init__( self : Optional[Any] , snake_case_ : Dict , **snake_case_ : Tuple ):
requires_backends(self , "timm" )
super().__init__(snake_case_ )
_UpperCAmelCase = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(snake_case_ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase = getattr(snake_case_ , "use_pretrained_backbone" , snake_case_ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase = config.out_indices if getattr(snake_case_ , "out_indices" , snake_case_ ) is not None else (-1,)
_UpperCAmelCase = timm.create_model(
config.backbone , pretrained=snake_case_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=snake_case_ , **snake_case_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase = self._backbone.return_layers
_UpperCAmelCase = {layer["module"]: str(snake_case_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case_ )
@classmethod
def lowercase ( cls : List[str] , snake_case_ : Any , *snake_case_ : Tuple , **snake_case_ : str ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase = kwargs.pop("use_timm_backbone" , snake_case_ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase = TimmBackboneConfig(
backbone=snake_case_ , num_channels=snake_case_ , features_only=snake_case_ , use_pretrained_backbone=snake_case_ , out_indices=snake_case_ , )
return super()._from_config(snake_case_ , **snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : str ):
pass
def lowercase ( self : Tuple , snake_case_ : int , snake_case_ : str=None , snake_case_ : str=None , snake_case_ : Union[str, Any]=None , **snake_case_ : List[Any] ):
_UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase = self._all_layers
_UpperCAmelCase = self._backbone(snake_case_ , **snake_case_ )
_UpperCAmelCase = self._return_layers
_UpperCAmelCase = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase = self._backbone(snake_case_ , **snake_case_ )
_UpperCAmelCase = None
_UpperCAmelCase = tuple(snake_case_ )
_UpperCAmelCase = tuple(snake_case_ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case_ , hidden_states=snake_case_ , attentions=snake_case_ )
| 156 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE :Dict = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Optional[int] = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 156 | 1 |
'''simple docstring'''
a__ : Union[str, Any] = 9.8_06_65
def _lowercase ( __A ,__A ,__A = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 349 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = CodeGenTokenizer
snake_case__ = CodeGenTokenizerFast
snake_case__ = True
snake_case__ = {"add_prefix_space": True}
snake_case__ = False
def lowerCamelCase__ ( self : int ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCamelCase : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
__lowerCamelCase : Tuple = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
__lowerCamelCase : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__lowerCamelCase : str = {"unk_token": "<unk>"}
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase ) )
def lowerCamelCase__ ( self : int , **UpperCAmelCase : Any ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , **UpperCAmelCase : Any ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Dict ):
__lowerCamelCase : int = "lower newer"
__lowerCamelCase : Any = "lower newer"
return input_text, output_text
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Optional[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : List[Any] = "lower newer"
__lowerCamelCase : Optional[int] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
__lowerCamelCase : int = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : List[str] = tokens + [tokenizer.unk_token]
__lowerCamelCase : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def lowerCamelCase__ ( self : Dict ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase : Dict = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase )
__lowerCamelCase : Tuple = "lower newer"
# Testing tokenization
__lowerCamelCase : str = tokenizer.tokenize(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = rust_tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids without special tokens
__lowerCamelCase : Dict = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase , add_prefix_space=UpperCAmelCase )
__lowerCamelCase : int = rust_tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing conversion to ids with special tokens
__lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = tokenizer.encode(UpperCAmelCase , add_prefix_space=UpperCAmelCase )
__lowerCamelCase : int = rust_tokenizer.encode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
# Testing the unknown token
__lowerCamelCase : Optional[Any] = tokens + [rust_tokenizer.unk_token]
__lowerCamelCase : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Tuple ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase__ ( self : Any , UpperCAmelCase : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# Simple input
__lowerCamelCase : Tuple = "This is a simple input"
__lowerCamelCase : int = ["This is a simple input 1", "This is a simple input 2"]
__lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
__lowerCamelCase : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" )
# Simple input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(UpperCAmelCase , tokenizer_r.encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" )
# Pair input
self.assertRaises(
UpperCAmelCase , tokenizer_r.batch_encode_plus , UpperCAmelCase , max_length=UpperCAmelCase , padding="max_length" , )
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
__lowerCamelCase : Any = "This is a simple input"
__lowerCamelCase : Optional[Any] = ["This is a simple input looooooooong", "This is a simple input"]
__lowerCamelCase : List[Any] = ("This is a simple input", "This is a pair")
__lowerCamelCase : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
__lowerCamelCase : Any = tokenizer.pad_token_id
__lowerCamelCase : Dict = tokenizer(UpperCAmelCase , padding="max_length" , max_length=30 , return_tensors="np" )
__lowerCamelCase : str = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="np" )
__lowerCamelCase : Any = tokenizer(*UpperCAmelCase , padding="max_length" , max_length=60 , return_tensors="np" )
__lowerCamelCase : str = tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncate=UpperCAmelCase , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Dict = "$$$"
__lowerCamelCase : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=UpperCAmelCase , add_bos_token=UpperCAmelCase )
__lowerCamelCase : List[str] = "This is a simple input"
__lowerCamelCase : str = ["This is a simple input 1", "This is a simple input 2"]
__lowerCamelCase : Dict = tokenizer.bos_token_id
__lowerCamelCase : List[str] = tokenizer(UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = tokenizer(UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowerCamelCase : Optional[int] = tokenizer.decode(out_s.input_ids )
__lowerCamelCase : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : List[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
__lowerCamelCase : int = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
__lowerCamelCase : str = "\nif len_a > len_b: result = a\nelse: result = b"
__lowerCamelCase : List[str] = tokenizer.encode(UpperCAmelCase )
__lowerCamelCase : Dict = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
__lowerCamelCase : str = tokenizer.decode(UpperCAmelCase , truncate_before_pattern=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
pass | 64 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
class _snake_case ( a__ ):
snake_case__ = "bert-generation"
def __init__( self : Optional[int] , UpperCAmelCase : Dict=50358 , UpperCAmelCase : int=1024 , UpperCAmelCase : Optional[int]=24 , UpperCAmelCase : str=16 , UpperCAmelCase : str=4096 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Union[str, Any]=512 , UpperCAmelCase : Optional[Any]=0.0_2 , UpperCAmelCase : int=1E-12 , UpperCAmelCase : Tuple=0 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Union[str, Any]="absolute" , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : int = hidden_act
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : Optional[Any] = use_cache | 64 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A : List[Any] = logging.get_logger(__name__)
_A : List[str] = {
"""nielsr/canine-s""": 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
_A : List[str] = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A : Optional[int] = 0
_A : Union[str, Any] = 0XE_000
_A : Optional[Any] = 0XE_001
_A : str = 0XE_002
_A : List[str] = 0XE_003
_A : str = 0XE_004
# Maps special codepoints to human-readable names.
_A : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class a__ ( a_ ):
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a=chr(_a ) , _a=chr(_a ) , _a=chr(_a ) , _a=chr(_a ) , _a=chr(_a ) , _a=chr(_a ) , _a=False , _a=2_048 , **_a , ):
lowercase : List[str] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
lowercase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
lowercase : int = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
lowercase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
lowercase : Any = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : List[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , model_max_length=_a , **_a , )
# Creates a mapping for looking up the IDs of special symbols.
lowercase : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
lowercase : Dict = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
lowercase : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
lowercase : int = UNICODE_VOCAB_SIZE
lowercase : Union[str, Any] = len(self._special_codepoints )
@property
def __magic_name__ ( self ):
return self._unicode_vocab_size
def __magic_name__ ( self , _a ):
return list(_a )
def __magic_name__ ( self , _a ):
try:
return ord(_a )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def __magic_name__ ( self , _a ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_a )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def __magic_name__ ( self , _a ):
return "".join(_a )
def __magic_name__ ( self , _a , _a = None ):
lowercase : Optional[Any] = [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
lowercase : int = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __magic_name__ ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
lowercase : Union[str, Any] = [1] + ([0] * len(_a )) + [1]
if token_ids_a is not None:
result += ([0] * len(_a )) + [1]
return result
def __magic_name__ ( self , _a , _a = None ):
lowercase : List[Any] = [self.sep_token_id]
lowercase : Tuple = [self.cls_token_id]
lowercase : int = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __magic_name__ ( self , _a , _a = None ):
return ()
| 202 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( __snake_case : list[int] ) -> list[int]:
if len(__snake_case ) == 0:
return array
lowercase , lowercase : Tuple = min(__snake_case ), max(__snake_case )
# Compute the variables
lowercase : Optional[Any] = _max - _min + 1
lowercase , lowercase : List[str] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase : Tuple = i - _min
lowercase : str = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase : Union[str, Any] = 0
for i in range(__snake_case ):
while holes_repeat[i] > 0:
lowercase : Tuple = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : str = input("""Enter numbers separated by comma:\n""")
_A : Optional[Any] = [int(x) for x in user_input.split(""",""")]
print(pigeon_sort(unsorted))
| 202 | 1 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( a__ , a__ ):
"""simple docstring"""
assert isinstance(a__ , a__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_text_dataset(a__ , a__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , features=a__ , cache_dir=a__ ).read()
_check_text_dataset(a__ , a__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , cache_dir=a__ , split=a__ ).read()
_check_text_dataset(a__ , a__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if issubclass(a__ , a__ ):
__SCREAMING_SNAKE_CASE = text_path
elif issubclass(a__ , a__ ):
__SCREAMING_SNAKE_CASE = [text_path]
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , cache_dir=a__ ).read()
_check_text_dataset(a__ , a__ )
def a__ ( a__ , a__ , a__=("train",) ):
"""simple docstring"""
assert isinstance(a__ , a__ )
for split in splits:
__SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE = TextDatasetReader({"""train""": text_path} , cache_dir=a__ , keep_in_memory=a__ ).read()
_check_text_datasetdict(a__ , a__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
] , )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(a__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = TextDatasetReader({"""train""": text_path} , features=a__ , cache_dir=a__ ).read()
_check_text_datasetdict(a__ , a__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if split:
__SCREAMING_SNAKE_CASE = {split: text_path}
else:
__SCREAMING_SNAKE_CASE = """train"""
__SCREAMING_SNAKE_CASE = {"""train""": text_path, """test""": text_path}
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""text""": """string"""}
__SCREAMING_SNAKE_CASE = TextDatasetReader(a__ , cache_dir=a__ ).read()
_check_text_datasetdict(a__ , a__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 331 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
UpperCAmelCase : Dict = TypeVar('T')
def a__ ( a__ ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( a__ ):
"""simple docstring"""
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase__ ( self : Tuple ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
__SCREAMING_SNAKE_CASE = self.elements
self.elements += 1
self._bubble_up(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0]
self._bubble_down(__SCREAMING_SNAKE_CASE )
return elem
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE = (elem, weight)
if position > 0:
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
self._bubble_down(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
if curr_pos == 0:
return None
__SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_up(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE = get_child_left_position(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = get_child_right_position(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return self._bubble_down(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__SCREAMING_SNAKE_CASE = nodea_pos
__SCREAMING_SNAKE_CASE = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
if node not in self.connections:
__SCREAMING_SNAKE_CASE = {}
self.nodes += 1
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
self.add_node(__SCREAMING_SNAKE_CASE )
self.add_node(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = weight
__SCREAMING_SNAKE_CASE = weight
def a__ ( a__ , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections}
__SCREAMING_SNAKE_CASE = {node: None for node in graph.connections}
__SCREAMING_SNAKE_CASE = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
__SCREAMING_SNAKE_CASE = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
# running prim's algorithm
while not priority_queue.is_empty():
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
return dist, parent
| 331 | 1 |
'''simple docstring'''
from ....utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class lowercase_ ( a__ ):
def __init__( self , a , a=None , a=20_48 ):
UpperCamelCase__ = config.__dict__
UpperCamelCase__ = modal_hidden_size
if num_labels:
UpperCamelCase__ = num_labels
| 80 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = CLIPTokenizer
snake_case__ : Dict = CLIPTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : Optional[Any] = {}
snake_case__ : Dict = False
def UpperCAmelCase_ ( self : Any ) -> Any:
super().setUp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__SCREAMING_SNAKE_CASE = {"unk_token": "<unk>"}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] , **UpperCAmelCase__ : Tuple ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Any , **UpperCAmelCase__ : Optional[Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__SCREAMING_SNAKE_CASE = "lower newer"
__SCREAMING_SNAKE_CASE = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__SCREAMING_SNAKE_CASE = "xa\u0303y" + " " + "x\xe3y"
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
__SCREAMING_SNAKE_CASE = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
__SCREAMING_SNAKE_CASE = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__SCREAMING_SNAKE_CASE = tokenizer_s.tokenize(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__SCREAMING_SNAKE_CASE = F"""{text_of_1_token} {text_of_1_token}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__SCREAMING_SNAKE_CASE = F""" {text}"""
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(UpperCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
super().test_tokenization_python_rust_equals()
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
# CLIP always lower cases letters
pass
| 54 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = '''deta'''
__A = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Tuple , lowercase_ : int=None , lowercase_ : Union[str, Any]=900 , lowercase_ : Any=2048 , lowercase_ : Optional[int]=6 , lowercase_ : Optional[int]=2048 , lowercase_ : List[Any]=8 , lowercase_ : Union[str, Any]=6 , lowercase_ : Optional[Any]=1024 , lowercase_ : Dict=8 , lowercase_ : Any=0.0 , lowercase_ : str=True , lowercase_ : List[Any]="relu" , lowercase_ : Optional[int]=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : List[str]=1.0 , lowercase_ : List[str]=True , lowercase_ : Any=False , lowercase_ : int="sine" , lowercase_ : str=5 , lowercase_ : int=4 , lowercase_ : Any=4 , lowercase_ : Tuple=True , lowercase_ : List[Any]=300 , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : str=1 , lowercase_ : List[str]=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Tuple=1 , lowercase_ : int=1 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=0.1 , lowercase_ : List[Any]=0.25 , **lowercase_ : Any , ) -> List[str]:
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"])
else:
if isinstance(lowercase_ , lowercase_):
_UpperCamelCase = backbone_config.pop("model_type")
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowercase_)
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True.")
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_)
@property
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
return self.d_model
def __UpperCAmelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCamelCase = copy.deepcopy(self.__dict__)
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 362 | from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , lowercase_ : Optional[NestedDataStructureLike[PathLike]] = None , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Dict , ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = path_or_paths
_UpperCamelCase = split if split or isinstance(lowercase_ , lowercase_) else "train"
_UpperCamelCase = features
_UpperCamelCase = cache_dir
_UpperCamelCase = keep_in_memory
_UpperCamelCase = streaming
_UpperCamelCase = num_proc
_UpperCamelCase = kwargs
@abstractmethod
def __UpperCAmelCase ( self : Any) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
"""simple docstring"""
pass
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ) -> str:
"""simple docstring"""
_UpperCamelCase = features
_UpperCamelCase = cache_dir
_UpperCamelCase = keep_in_memory
_UpperCamelCase = streaming
_UpperCamelCase = num_proc
_UpperCamelCase = kwargs
@abstractmethod
def __UpperCAmelCase ( self : Any) -> Union[Dataset, IterableDataset]:
"""simple docstring"""
pass
| 63 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A__: Tuple = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class A__ ( _lowerCAmelCase , unittest.TestCase ):
__UpperCamelCase : Optional[int] = GPTSwaTokenizer
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : List[Any] = True
__UpperCamelCase : Dict = False
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] =GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Optional[int] ) -> str:
'''simple docstring'''
_a : Optional[Any] ="""This is a test"""
_a : List[Any] ="""This is a test"""
return input_text, output_text
def __UpperCAmelCase ( self :str ) -> Optional[Any]:
'''simple docstring'''
_a : Dict ="""<s>"""
_a : Union[str, Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self :Optional[Any] ) -> int:
'''simple docstring'''
_a : Optional[int] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2_0_0_0 )
def __UpperCAmelCase ( self :Dict ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : str =GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ )
_a : Tuple =tokenizer.tokenize("""This is a test""" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
_a : Union[str, Any] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
_a : int =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
_a : Any =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def __UpperCAmelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_a : List[Any] =GPTSwaTokenizer(SCREAMING_SNAKE_CASE_ )
_a : Union[str, Any] =["""This is a test""", """I was born in 92000, and this is falsé."""]
_a : Any =[
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
@slow
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : List[Any] =[
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
_a : Any ={"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=SCREAMING_SNAKE_CASE_ , )
| 276 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Optional[Any] = '▁'
a : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
a : Optional[Any] = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
a : Any = {
'facebook/xglm-564M': 2_048,
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> None:
UpperCAmelCase_: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
UpperCAmelCase_: Optional[int] = 7
UpperCAmelCase_: Dict = [f'<madeupword{i}>' for i in range(self.num_madeup_words )]
UpperCAmelCase_: List[Any] = kwargs.get("""additional_special_tokens""", [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, sp_model_kwargs=self.sp_model_kwargs, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_: Dict = 1
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_: Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
UpperCAmelCase_: Union[str, Any] = len(self.sp_model )
UpperCAmelCase_: Optional[int] = {f'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ) -> Any:
UpperCAmelCase_: List[Any] = self.__dict__.copy()
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: List[Any] = d
# for backward compatibility
if not hasattr(self, """sp_model_kwargs""" ):
UpperCAmelCase_: int = {}
UpperCAmelCase_: List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
UpperCAmelCase_: List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ ))
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __snake_case (self ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Tuple = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_, out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_: str = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: int = """""".join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_, """ """ ).strip()
return out_string
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_: List[Any] = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_, """wb""" ) as fi:
UpperCAmelCase_: Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 147 | 0 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Optional[int] = list[tuple[int, int]]
_SCREAMING_SNAKE_CASE : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_SCREAMING_SNAKE_CASE : int = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ):
snake_case = pos_x
snake_case = pos_y
snake_case = (pos_y, pos_x)
snake_case = goal_x
snake_case = goal_y
snake_case = g_cost
snake_case = parent
snake_case = self.calculate_heuristic()
def a_ ( self ):
snake_case = abs(self.pos_x - self.goal_x )
snake_case = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , __snake_case ):
return self.f_cost < other.f_cost
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case ):
snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __snake_case )
snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , __snake_case )
snake_case = [self.start]
snake_case = []
snake_case = False
def a_ ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case = True
return self.retrace_path(__snake_case )
self.closed_nodes.append(__snake_case )
snake_case = self.get_successors(__snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__snake_case )
else:
# retrieve the best current path
snake_case = self.open_nodes.pop(self.open_nodes.index(__snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__snake_case )
else:
self.open_nodes.append(__snake_case )
if not self.reached:
return [self.start.pos]
return None
def a_ ( self , __snake_case ):
snake_case = []
for action in delta:
snake_case = parent.pos_x + action[1]
snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__snake_case , __snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __snake_case , ) )
return successors
def a_ ( self , __snake_case ):
snake_case = node
snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : int = (0, 0)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
_SCREAMING_SNAKE_CASE : Dict = GreedyBestFirst(init, goal)
_SCREAMING_SNAKE_CASE : Dict = greedy_bf.search()
if path:
for pos_x, pos_y in path:
_SCREAMING_SNAKE_CASE : str = 2
for elem in grid:
print(elem)
| 213 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
class A__ ( enum.Enum ):
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = 1
@add_end_docstrings(snake_case__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'generated'
def __init__( self , *__snake_case , **__snake_case ):
super().__init__(*__snake_case , **__snake_case )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def a_ ( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , **__snake_case , ):
snake_case = {}
if truncation is not None:
snake_case = truncation
snake_case = generate_kwargs
snake_case = {}
if return_tensors is not None and return_type is None:
snake_case = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case = return_type
if clean_up_tokenization_spaces is not None:
snake_case = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case = self.tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
if len(__snake_case ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
snake_case = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , __snake_case , __snake_case , __snake_case ):
return True
def a_ ( self , *__snake_case , __snake_case ):
snake_case = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , __snake_case ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
snake_case = ([prefix + arg for arg in args[0]],)
snake_case = True
elif isinstance(args[0] , __snake_case ):
snake_case = (prefix + args[0],)
snake_case = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
snake_case = self.tokenizer(*__snake_case , padding=__snake_case , truncation=__snake_case , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__snake_case , **__snake_case ):
snake_case = super().__call__(*__snake_case , **__snake_case )
if (
isinstance(args[0] , __snake_case )
and all(isinstance(__snake_case , __snake_case ) for el in args[0] )
and all(len(__snake_case ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def a_ ( self , __snake_case , __snake_case=TruncationStrategy.DO_NOT_TRUNCATE , **__snake_case ):
snake_case = self._parse_and_tokenize(__snake_case , truncation=__snake_case , **__snake_case )
return inputs
def a_ ( self , __snake_case , **__snake_case ):
if self.framework == "pt":
snake_case , snake_case = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
snake_case , snake_case = tf.shape(model_inputs['''input_ids'''] ).numpy()
snake_case = generate_kwargs.get('''min_length''' , self.model.config.min_length )
snake_case = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(__snake_case , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
snake_case = self.model.generate(**__snake_case , **__snake_case )
snake_case = output_ids.shape[0]
if self.framework == "pt":
snake_case = output_ids.reshape(__snake_case , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case = tf.reshape(__snake_case , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def a_ ( self , __snake_case , __snake_case=ReturnType.TEXT , __snake_case=False ):
snake_case = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
snake_case = {
F'''{self.return_name}_text''': self.tokenizer.decode(
__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case , )
}
records.append(__snake_case )
return records
@add_end_docstrings(snake_case__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'summary'
def __call__( self , *__snake_case , **__snake_case ):
return super().__call__(*__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case__ )
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'translation'
def a_ ( self , __snake_case , __snake_case , __snake_case ):
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def a_ ( self , *__snake_case , __snake_case=TruncationStrategy.DO_NOT_TRUNCATE , __snake_case=None , __snake_case=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , __snake_case ):
return self.tokenizer._build_translation_inputs(
*__snake_case , return_tensors=self.framework , truncation=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case )
else:
return super()._parse_and_tokenize(*__snake_case , truncation=__snake_case )
def a_ ( self , __snake_case=None , __snake_case=None , **__snake_case ):
snake_case , snake_case , snake_case = super()._sanitize_parameters(**__snake_case )
if src_lang is not None:
snake_case = src_lang
if tgt_lang is not None:
snake_case = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case = kwargs.get('''task''' , self.task )
snake_case = task.split('''_''' )
if task and len(__snake_case ) == 4:
# translation, XX, to YY
snake_case = items[1]
snake_case = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__snake_case , **__snake_case ):
return super().__call__(*__snake_case , **__snake_case )
| 213 | 1 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: str , *UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=None , **UpperCamelCase_: Any ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
lowercase__ = quant_trainer_args
lowercase__ = 128 # default number of calibration samples
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any]=None ) -> Any:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('''Trainer: calibration requires an calib_dataset.''' )
lowercase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowercase__ = self._remove_unused_columns(UpperCamelCase_ , description='''Calibration''' )
return DataLoader(
UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str=None ) -> Dict:
"""simple docstring"""
lowercase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowercase__ = self.get_calib_dataloader(UpperCamelCase_ )
lowercase__ = self.model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCamelCase_ )
logger.info('''***** Running calibration *****''' )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(UpperCamelCase_ ):
# Prediction step
lowercase__ , lowercase__ , lowercase__ = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args )
lowercase__ = model
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "eval" ) -> Tuple:
"""simple docstring"""
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
self.log(UpperCamelCase_ )
else:
lowercase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: List[Any]=None , UpperCamelCase_: str = "test" ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
def lowerCamelCase_ ( self: int , UpperCamelCase_: Tuple="./" ) -> str:
"""simple docstring"""
lowercase__ = self.eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = next(iter(UpperCamelCase_ ) )
# saving device - to make it consistent
lowercase__ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
# convert to tuple
lowercase__ = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() )
logger.info('''Converting model to be onnx compatible''' )
from pytorch_quantization.nn import TensorQuantizer
lowercase__ = True
lowercase__ = self.model.to(UpperCamelCase_ )
model.eval()
model.float()
lowercase__ = model.module if hasattr(UpperCamelCase_ , '''module''' ) else model
quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args )
lowercase__ = os.path.join(UpperCamelCase_ , '''model.onnx''' )
logger.info(f'exporting model to {output_model_file}' )
lowercase__ = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={
'''input_ids''': axes,
'''attention_mask''': axes,
'''token_type_ids''': axes,
'''output_start_logits''': axes,
'''output_end_logits''': axes,
} , verbose=UpperCamelCase_ , )
logger.info('''onnx export finished''' )
| 110 |
from __future__ import annotations
from collections.abc import Iterator
class _a :
def __init__( self: List[str] , UpperCamelCase_: int ) -> None:
"""simple docstring"""
lowercase__ = value
lowercase__ = None
lowercase__ = None
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: Node ) -> None:
"""simple docstring"""
lowercase__ = tree
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self: List[str] ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 110 | 1 |
"""simple docstring"""
import os
from math import logaa
def snake_case_ ( A_ : str = "base_exp.txt" ):
'''simple docstring'''
_lowerCamelCase : float = 0
_lowerCamelCase : Tuple = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_snake_case ), _snake_case ) ) ):
_lowerCamelCase : List[str] = list(map(_snake_case, line.split(''',''' ) ) )
if x * logaa(_snake_case ) > largest:
_lowerCamelCase : str = x * logaa(_snake_case )
_lowerCamelCase : int = i + 1
return result
if __name__ == "__main__":
print(solution())
| 369 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
_lowerCamelCase : str = F'''Input value of [number={number}] must be an integer'''
raise TypeError(A_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(A_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Tuple = '''deit'''
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=7_6_8 , SCREAMING_SNAKE_CASE__ : List[Any]=1_2 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Tuple="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_2_4 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : Optional[Any]=3 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 , **SCREAMING_SNAKE_CASE__ : List[Any] , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = hidden_size
a_ : Dict = num_hidden_layers
a_ : int = num_attention_heads
a_ : Optional[Any] = intermediate_size
a_ : Optional[int] = hidden_act
a_ : int = hidden_dropout_prob
a_ : Any = attention_probs_dropout_prob
a_ : List[str] = initializer_range
a_ : Optional[Any] = layer_norm_eps
a_ : str = image_size
a_ : Dict = patch_size
a_ : Union[str, Any] = num_channels
a_ : Tuple = qkv_bias
a_ : int = encoder_stride
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Any = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> float:
return 1E-4
| 32 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
snake_case__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : Optional[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ : str = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
a_ : List[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
a_ : int = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
a_ : Tuple = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
a_ : List[str] = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
a_ : Tuple = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
a_ : Union[str, Any] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
a_ : List[str] = text_classifier('This is great !' , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
a_ : int = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] , )
a_ : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
import torch
a_ : List[Any] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
a_ : Any = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : List[str] = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
a_ : Optional[int] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ : List[str] = pipeline('text-classification' )
a_ : Dict = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
a_ : Union[str, Any] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
a_ : Tuple = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
a_ : Dict = pipeline('text-classification' , framework='tf' )
a_ : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
a_ : int = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
a_ : Optional[int] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': 'POSITIVE', 'score': 0.988}] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
a_ : Optional[Any] = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
a_ : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a_ : Union[str, Any] = 'HuggingFace is in'
a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
a_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France']
a_ : int = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}, {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a_ : List[Any] = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ )
a_ : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] * N] , )
a_ : int = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
a_ : Optional[int] = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a_ : Any = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
text_classifier(SCREAMING_SNAKE_CASE__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{'label': ANY(SCREAMING_SNAKE_CASE__ ), 'score': ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 32 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowerCAmelCase ( lowerCAmelCase):
_a = '''wavlm'''
def __init__( self: Any , _lowerCAmelCase: Union[str, Any]=32 , _lowerCAmelCase: Dict=7_68 , _lowerCAmelCase: Union[str, Any]=12 , _lowerCAmelCase: Optional[int]=12 , _lowerCAmelCase: List[str]=30_72 , _lowerCAmelCase: int="gelu" , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: Any=0.1 , _lowerCAmelCase: Optional[int]=0.1 , _lowerCAmelCase: int=0.0 , _lowerCAmelCase: Optional[Any]=0.1 , _lowerCAmelCase: Tuple=0.1 , _lowerCAmelCase: Optional[Any]=0.02 , _lowerCAmelCase: List[str]=1e-5 , _lowerCAmelCase: Optional[int]="group" , _lowerCAmelCase: str="gelu" , _lowerCAmelCase: Optional[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _lowerCAmelCase: Tuple=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase: int=(10, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase: Optional[Any]=False , _lowerCAmelCase: str=1_28 , _lowerCAmelCase: Union[str, Any]=16 , _lowerCAmelCase: List[Any]=3_20 , _lowerCAmelCase: str=8_00 , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Any=True , _lowerCAmelCase: Any=0.05 , _lowerCAmelCase: Optional[Any]=10 , _lowerCAmelCase: Optional[int]=2 , _lowerCAmelCase: Dict=0.0 , _lowerCAmelCase: List[Any]=10 , _lowerCAmelCase: str=3_20 , _lowerCAmelCase: Union[str, Any]=2 , _lowerCAmelCase: Any=0.1 , _lowerCAmelCase: Dict=1_00 , _lowerCAmelCase: Any=2_56 , _lowerCAmelCase: Tuple=2_56 , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Tuple="mean" , _lowerCAmelCase: Any=False , _lowerCAmelCase: Any=False , _lowerCAmelCase: Union[str, Any]=2_56 , _lowerCAmelCase: str=(5_12, 5_12, 5_12, 5_12, 15_00) , _lowerCAmelCase: List[str]=(5, 3, 3, 1, 1) , _lowerCAmelCase: List[Any]=(1, 2, 3, 1, 1) , _lowerCAmelCase: Optional[Any]=5_12 , _lowerCAmelCase: Tuple=80 , _lowerCAmelCase: List[str]=0 , _lowerCAmelCase: List[str]=1 , _lowerCAmelCase: Union[str, Any]=2 , _lowerCAmelCase: Tuple=False , _lowerCAmelCase: Dict=3 , _lowerCAmelCase: Dict=2 , _lowerCAmelCase: Any=3 , _lowerCAmelCase: str=None , **_lowerCAmelCase: List[str] , ):
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
lowercase :Optional[int] = hidden_size
lowercase :List[Any] = feat_extract_norm
lowercase :int = feat_extract_activation
lowercase :str = list(_lowerCAmelCase )
lowercase :Dict = list(_lowerCAmelCase )
lowercase :Union[str, Any] = list(_lowerCAmelCase )
lowercase :Optional[int] = conv_bias
lowercase :Union[str, Any] = num_buckets
lowercase :str = max_bucket_distance
lowercase :Any = num_conv_pos_embeddings
lowercase :Tuple = num_conv_pos_embedding_groups
lowercase :Optional[int] = len(self.conv_dim )
lowercase :Union[str, Any] = num_hidden_layers
lowercase :Dict = intermediate_size
lowercase :int = hidden_act
lowercase :str = num_attention_heads
lowercase :Union[str, Any] = hidden_dropout
lowercase :List[str] = attention_dropout
lowercase :str = activation_dropout
lowercase :Optional[Any] = feat_proj_dropout
lowercase :Optional[int] = final_dropout
lowercase :List[Any] = layerdrop
lowercase :str = layer_norm_eps
lowercase :Any = initializer_range
lowercase :Optional[Any] = num_ctc_classes
lowercase :str = vocab_size
lowercase :Dict = do_stable_layer_norm
lowercase :Optional[Any] = use_weighted_layer_sum
lowercase :Tuple = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase :int = apply_spec_augment
lowercase :int = mask_time_prob
lowercase :Optional[int] = mask_time_length
lowercase :Optional[Any] = mask_time_min_masks
lowercase :Dict = mask_feature_prob
lowercase :Optional[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowercase :List[Any] = num_codevectors_per_group
lowercase :Union[str, Any] = num_codevector_groups
lowercase :Any = contrastive_logits_temperature
lowercase :Optional[int] = num_negatives
lowercase :Union[str, Any] = codevector_dim
lowercase :int = proj_codevector_dim
lowercase :Optional[Any] = diversity_loss_weight
# ctc loss
lowercase :Union[str, Any] = ctc_loss_reduction
lowercase :Any = ctc_zero_infinity
# adapter
lowercase :List[str] = add_adapter
lowercase :List[str] = adapter_kernel_size
lowercase :Optional[Any] = adapter_stride
lowercase :List[Any] = num_adapter_layers
lowercase :Tuple = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase :str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase :int = list(_lowerCAmelCase )
lowercase :List[str] = list(_lowerCAmelCase )
lowercase :Any = list(_lowerCAmelCase )
lowercase :List[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE ( self: Dict ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 158 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
_UpperCAmelCase : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
_UpperCAmelCase : Any = "sshleifer/tiny-mbart"
@require_torch
class __lowerCAmelCase ( lowerCAmelCase):
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: int=False , _lowerCAmelCase: str=None , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Union[str, Any]=True , ):
lowercase :Any = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowerCAmelCase , num_train_epochs=1 , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , )
lowercase :List[Any] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
lowercase :Union[str, Any] = [log for log in logs if "eval_loss" in log.keys()]
lowercase :Any = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowercase :Optional[Any] = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self: str ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Tuple ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Dict ):
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=_lowerCAmelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
self.run_seqaseq_quick(
distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=_lowerCAmelCase )
@require_apex
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Any ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowercase :List[Any] = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
lowercase :str = experiments[experiment_id]
lowercase :Dict = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
lowercase :List[str] = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowerCAmelCase , extra_args_str=data["extra_args_str"] )
lowercase :Dict = len(re.findall(_lowerCAmelCase , cl.err ) )
self.assertEqual(_lowerCAmelCase , data["n_matches"] )
@slow
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :Dict = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=_lowerCAmelCase , )
# Check metrics
lowercase :List[str] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history
lowercase :Dict = [log for log in logs if "eval_loss" in log.keys()]
lowercase :str = eval_metrics[0]
lowercase :Optional[int] = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase )
# test if do_predict saves generations and metrics
lowercase :Optional[Any] = os.listdir(_lowerCAmelCase )
lowercase :List[str] = {os.path.basename(_lowerCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def SCREAMING_SNAKE_CASE ( self: Tuple ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowerCAmelCase: str ) -> Tuple[int, float]:
lowercase :Tuple = "--skip_memory_metrics 0"
lowercase :List[str] = self.run_trainer(
max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=_lowerCAmelCase , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , n_gpus_to_use=1 , )
# Check metrics
lowercase :List[str] = TrainerState.load_from_json(Path(_lowerCAmelCase , "trainer_state.json" ) ).log_history
lowercase :Dict = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
lowercase :Any = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
lowercase :List[str] = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowercase , lowercase , lowercase :Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowercase , lowercase , lowercase :List[str] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowercase :List[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowercase :List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowercase :List[str] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowercase :Tuple = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowercase :Union[str, Any] = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowerCAmelCase , _lowerCAmelCase , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: float = 3e-3 , _lowerCAmelCase: str = "adafactor" , _lowerCAmelCase: bool = False , _lowerCAmelCase: str = None , _lowerCAmelCase: int = 0 , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = None , ):
lowercase :Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
lowercase :Optional[Any] = self.get_auto_remove_tmp_dir()
lowercase :Tuple = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowerCAmelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowerCAmelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
lowercase :Union[str, Any] = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowerCAmelCase )}\n ".split()
lowercase :str = "\n --do_predict\n ".split()
lowercase :Union[str, Any] = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowercase :Optional[int] = get_gpu_count()
lowercase :str = get_torch_dist_unique_port()
lowercase :Union[str, Any] = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
lowercase :Optional[int] = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
else:
lowercase :Tuple = ["run_translation.py"] + args
with patch.object(_lowerCAmelCase , "argv" , _lowerCAmelCase ):
main()
return output_dir
| 158 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.