code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : str = ['''image_processor''', '''tokenizer''']
_lowerCAmelCase : Tuple = '''OwlViTImageProcessor'''
_lowerCAmelCase : Optional[int] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__):
__UpperCAmelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__UpperCAmelCase : Tuple = kwargs.pop('''feature_extractor''')
__UpperCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(lowercase__ , lowercase__)
def __call__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="max_length" , lowercase__="np" , **lowercase__):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''')
if text is not None:
if isinstance(lowercase__ , lowercase__) or (isinstance(lowercase__ , lowercase__) and not isinstance(text[0] , lowercase__)):
__UpperCAmelCase : Dict = [self.tokenizer(lowercase__ , padding=lowercase__ , return_tensors=lowercase__ , **lowercase__)]
elif isinstance(lowercase__ , lowercase__) and isinstance(text[0] , lowercase__):
__UpperCAmelCase : Union[str, Any] = []
# Maximum number of queries across batch
__UpperCAmelCase : Tuple = max([len(lowercase__) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(lowercase__) != max_num_queries:
__UpperCAmelCase : List[Any] = t + [''' '''] * (max_num_queries - len(lowercase__))
__UpperCAmelCase : List[str] = self.tokenizer(lowercase__ , padding=lowercase__ , return_tensors=lowercase__ , **lowercase__)
encodings.append(lowercase__)
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''')
if return_tensors == "np":
__UpperCAmelCase : Union[str, Any] = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
__UpperCAmelCase : int = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCAmelCase : int = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0)
__UpperCAmelCase : Any = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCAmelCase : List[str] = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0)
__UpperCAmelCase : Any = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCAmelCase : Dict = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0)
__UpperCAmelCase : List[Any] = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0)
else:
raise ValueError('''Target return tensor type could not be returned''')
__UpperCAmelCase : List[Any] = BatchEncoding()
__UpperCAmelCase : Tuple = input_ids
__UpperCAmelCase : List[Any] = attention_mask
if query_images is not None:
__UpperCAmelCase : Any = BatchEncoding()
__UpperCAmelCase : str = self.image_processor(
lowercase__ , return_tensors=lowercase__ , **lowercase__).pixel_values
__UpperCAmelCase : Tuple = query_pixel_values
if images is not None:
__UpperCAmelCase : Optional[int] = self.image_processor(lowercase__ , return_tensors=lowercase__ , **lowercase__)
if text is not None and images is not None:
__UpperCAmelCase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCAmelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__) , tensor_type=lowercase__)
def A( self , *lowercase__ , **lowercase__):
return self.image_processor.post_process(*lowercase__ , **lowercase__)
def A( self , *lowercase__ , **lowercase__):
return self.image_processor.post_process_object_detection(*lowercase__ , **lowercase__)
def A( self , *lowercase__ , **lowercase__):
return self.image_processor.post_process_image_guided_detection(*lowercase__ , **lowercase__)
def A( self , *lowercase__ , **lowercase__):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__)
def A( self , *lowercase__ , **lowercase__):
return self.tokenizer.decode(*lowercase__ , **lowercase__)
@property
def A( self):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def A( self):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 1 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=3 , lowercase__=3_2 , lowercase__=3 , lowercase__=1_0 , lowercase__=[1_0, 2_0, 3_0, 4_0] , lowercase__=[1, 1, 2, 1] , lowercase__=True , lowercase__=True , lowercase__="relu" , lowercase__=3 , lowercase__=None , ):
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : List[Any] = embeddings_size
__UpperCAmelCase : int = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Any = use_labels
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : str = len(lowercase__)
def A( self):
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values
def A( self):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = FlaxRegNetModel(config=lowercase__)
__UpperCAmelCase : Union[str, Any] = model(lowercase__)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : Any = FlaxRegNetForImageClassification(config=lowercase__)
__UpperCAmelCase : Optional[int] = model(lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A( self):
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_lowerCAmelCase : Any = False
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[int] = False
def A( self):
__UpperCAmelCase : Dict = FlaxRegNetModelTester(self)
__UpperCAmelCase : Dict = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__)
def A( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A( self):
return
def A( self):
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__)
@unittest.skip(reason='''RegNet does not use inputs_embeds''')
def A( self):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(lowercase__)
__UpperCAmelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def A( self):
def check_hidden_states_output(lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Any = model_class(lowercase__)
__UpperCAmelCase : List[str] = model(**self._prepare_for_class(lowercase__ , lowercase__))
__UpperCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(lowercase__) , expected_num_stages + 1)
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCAmelCase : Any = self._prepare_for_class(lowercase__ , lowercase__)
__UpperCAmelCase : Any = model_class(lowercase__)
@jax.jit
def model_jitted(lowercase__ , **lowercase__):
return model(pixel_values=lowercase__ , **lowercase__)
with self.subTest('''JIT Enabled'''):
__UpperCAmelCase : Optional[Any] = model_jitted(**lowercase__).to_tuple()
with self.subTest('''JIT Disabled'''):
with jax.disable_jit():
__UpperCAmelCase : Dict = model_jitted(**lowercase__).to_tuple()
self.assertEqual(len(lowercase__) , len(lowercase__))
for jitted_output, output in zip(lowercase__ , lowercase__):
self.assertEqual(jitted_output.shape , output.shape)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''') if is_vision_available() else None
@slow
def A( self):
__UpperCAmelCase : str = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''')
__UpperCAmelCase : Optional[int] = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = prepare_img()
__UpperCAmelCase : Union[str, Any] = image_processor(images=lowercase__ , return_tensors='''np''')
__UpperCAmelCase : Optional[int] = model(**lowercase__)
# verify the logits
__UpperCAmelCase : str = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowercase__)
__UpperCAmelCase : Union[str, Any] = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4))
| 675 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCAmelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[str, float]:
'''simple docstring'''
__UpperCAmelCase : str = len([g for position, g in enumerate(lowercase_ ) if g == main_target[position]] )
return (item, float(lowercase_ ))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[str, str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = random.randint(0 , len(lowercase_ ) - 1 )
__UpperCAmelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
__UpperCAmelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = list(lowercase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__UpperCAmelCase : Union[str, Any] = random.choice(lowercase_ )
return "".join(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , ) -> list[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = []
# Generate more children proportionally to the fitness score.
__UpperCAmelCase : Tuple = int(parent_a[1] * 100 ) + 1
__UpperCAmelCase : Optional[int] = 10 if child_n >= 10 else child_n
for _ in range(lowercase_ ):
__UpperCAmelCase : Union[str, Any] = population_score[random.randint(0 , lowercase_ )][0]
__UpperCAmelCase , __UpperCAmelCase : Any = crossover(parent_a[0] , lowercase_ )
# Append new string to the population list.
pop.append(mutate(lowercase_ , lowercase_ ) )
pop.append(mutate(lowercase_ , lowercase_ ) )
return pop
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = True ) -> tuple[int, int, str]:
'''simple docstring'''
if N_POPULATION < N_SELECTED:
__UpperCAmelCase : Optional[Any] = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(lowercase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
__UpperCAmelCase : List[str] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__UpperCAmelCase : Dict = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(lowercase_ )
# Generate random starting population.
__UpperCAmelCase : List[Any] = []
for _ in range(lowercase_ ):
population.append(''''''.join([random.choice(lowercase_ ) for i in range(len(lowercase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
__UpperCAmelCase , __UpperCAmelCase : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__UpperCAmelCase : Tuple = [evaluate(lowercase_ , lowercase_ ) for item in population]
# Check if there is a matching evolution.
__UpperCAmelCase : Tuple = sorted(lowercase_ , key=lambda lowercase_ : x[1] , reverse=lowercase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__UpperCAmelCase : List[Any] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase_ )
# Normalize population score to be between 0 and 1.
__UpperCAmelCase : Union[str, Any] = [
(item, score / len(lowercase_ )) for item, score in population_score
]
# This is selection
for i in range(lowercase_ ):
population.extend(select(population_score[int(lowercase_ )] , lowercase_ , lowercase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase_ ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCAmelCase = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
lowerCAmelCase = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 675 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 1 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase = """__DUMMY_TRANSFORMERS_USER__"""
lowerCAmelCase = """Dummy User"""
lowerCAmelCase = """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"""
lowerCAmelCase = """https://hub-ci.huggingface.co"""
lowerCAmelCase = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
lowerCAmelCase = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
lowerCAmelCase = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , lowercase_ )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
HfFolder.save_token(lowercase_ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=lowercase_ )
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = HfFolder.get_token()
HfFolder.save_token(lowercase_ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
def _cleanup_repo(lowercase_ ):
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
'''simple docstring'''
@contextmanager
def _temporary_repo(lowercase_ ):
try:
yield repo_id
finally:
cleanup_repo(lowercase_ )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = f"repo_txt_data-{int(time.time() * 10e3 )}"
__UpperCAmelCase : Dict = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo='''data/text_data.txt''' , repo_id=lowercase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Any = f"repo_zipped_txt_data-{int(time.time() * 10e3 )}"
__UpperCAmelCase : Tuple = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo='''data.zip''' , repo_id=lowercase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : int = f"repo_zipped_img_data-{int(time.time() * 10e3 )}"
__UpperCAmelCase : Dict = f"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' , private=lowercase_ )
hf_api.upload_file(
token=lowercase_ , path_or_fileobj=str(lowercase_ ) , path_in_repo='''data.zip''' , repo_id=lowercase_ , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(lowercase_ , token=lowercase_ , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
_lowerCAmelCase : str = field(
default=_UpperCamelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_UpperCamelCase )} )
_lowerCAmelCase : str = field(
default=_UpperCamelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowerCAmelCase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowerCAmelCase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowerCAmelCase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowerCAmelCase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowerCAmelCase : bool = field(
default=_UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowerCAmelCase : bool = field(
default=_UpperCamelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowerCAmelCase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowerCAmelCase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowerCAmelCase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowerCAmelCase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = '''train'''
_lowerCAmelCase : List[Any] = '''dev'''
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : SquadDataTrainingArguments
_lowerCAmelCase : List[SquadFeatures]
_lowerCAmelCase : Split
_lowerCAmelCase : bool
def __init__( self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = Split.train , lowercase__ = False , lowercase__ = None , lowercase__ = "pt" , ):
__UpperCAmelCase : Union[str, Any] = args
__UpperCAmelCase : List[str] = is_language_sensitive
__UpperCAmelCase : Dict = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase__ , lowercase__):
try:
__UpperCAmelCase : str = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''')
__UpperCAmelCase : int = mode
# Load data features from cache or dataset file
__UpperCAmelCase : Tuple = '''v2''' if args.version_2_with_negative else '''v1'''
__UpperCAmelCase : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase : int = cached_features_file + '''.lock'''
with FileLock(lowercase__):
if os.path.exists(lowercase__) and not args.overwrite_cache:
__UpperCAmelCase : Any = time.time()
__UpperCAmelCase : Tuple = torch.load(lowercase__)
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__UpperCAmelCase : Any = self.old_features['''features''']
__UpperCAmelCase : Any = self.old_features.get('''dataset''' , lowercase__)
__UpperCAmelCase : List[Any] = self.old_features.get('''examples''' , lowercase__)
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start)
if self.dataset is None or self.examples is None:
logger.warning(
F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
''' future run''')
else:
if mode == Split.dev:
__UpperCAmelCase : List[Any] = self.processor.get_dev_examples(args.data_dir)
else:
__UpperCAmelCase : Dict = self.processor.get_train_examples(args.data_dir)
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase__ , )
__UpperCAmelCase : Dict = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , lowercase__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]")
def __len__( self):
return len(self.features)
def __getitem__( self , lowercase__):
# Convert to Tensors and build dataset
__UpperCAmelCase : List[Any] = self.features[i]
__UpperCAmelCase : Tuple = torch.tensor(feature.input_ids , dtype=torch.long)
__UpperCAmelCase : List[Any] = torch.tensor(feature.attention_mask , dtype=torch.long)
__UpperCAmelCase : Tuple = torch.tensor(feature.token_type_ids , dtype=torch.long)
__UpperCAmelCase : Tuple = torch.tensor(feature.cls_index , dtype=torch.long)
__UpperCAmelCase : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float)
__UpperCAmelCase : Any = torch.tensor(feature.is_impossible , dtype=torch.float)
__UpperCAmelCase : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask})
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible})
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa) * self.args.lang_id)})
if self.mode == Split.train:
__UpperCAmelCase : Optional[Any] = torch.tensor(feature.start_position , dtype=torch.long)
__UpperCAmelCase : List[Any] = torch.tensor(feature.end_position , dtype=torch.long)
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions})
return inputs
| 675 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCAmelCase : str = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
__UpperCAmelCase : List[Any] = getattr(lowercase_ , lowercase_ ).shape
else:
__UpperCAmelCase : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
__UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
__UpperCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__UpperCAmelCase : Any = value
elif weight_type == "bias":
__UpperCAmelCase : int = value
else:
__UpperCAmelCase : Tuple = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
__UpperCAmelCase : str = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : Any = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
__UpperCAmelCase : int = name.split(lowercase_ )[0].split('''.''' )[-2]
__UpperCAmelCase : Tuple = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
__UpperCAmelCase : Union[str, Any] = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase : Optional[Any] = '''weight_v'''
elif "weight" in name:
__UpperCAmelCase : Any = '''weight'''
elif "bias" in name:
__UpperCAmelCase : str = '''bias'''
else:
__UpperCAmelCase : str = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = full_name.split('''conv_layers.''' )[-1]
__UpperCAmelCase : int = name.split('''.''' )
__UpperCAmelCase : Dict = int(items[0] )
__UpperCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__UpperCAmelCase : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__UpperCAmelCase : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__UpperCAmelCase : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__UpperCAmelCase : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = SEWConfig()
if is_finetuned:
__UpperCAmelCase : str = model.wav_encoder.wav_model.cfg
else:
__UpperCAmelCase : Optional[int] = model.cfg
__UpperCAmelCase : Optional[int] = fs_config.conv_bias
__UpperCAmelCase : List[str] = eval(fs_config.conv_feature_layers )
__UpperCAmelCase : Dict = [x[0] for x in conv_layers]
__UpperCAmelCase : Optional[Any] = [x[1] for x in conv_layers]
__UpperCAmelCase : Any = [x[2] for x in conv_layers]
__UpperCAmelCase : Dict = '''gelu'''
__UpperCAmelCase : Union[str, Any] = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
__UpperCAmelCase : Optional[int] = 0.0
__UpperCAmelCase : Union[str, Any] = fs_config.activation_fn.name
__UpperCAmelCase : Union[str, Any] = fs_config.encoder_embed_dim
__UpperCAmelCase : Optional[int] = 0.0_2
__UpperCAmelCase : Optional[Any] = fs_config.encoder_ffn_embed_dim
__UpperCAmelCase : Any = 1e-5
__UpperCAmelCase : int = fs_config.encoder_layerdrop
__UpperCAmelCase : Optional[int] = fs_config.encoder_attention_heads
__UpperCAmelCase : Optional[Any] = fs_config.conv_pos_groups
__UpperCAmelCase : int = fs_config.conv_pos
__UpperCAmelCase : Optional[Any] = len(lowercase_ )
__UpperCAmelCase : int = fs_config.encoder_layers
__UpperCAmelCase : int = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
__UpperCAmelCase : str = model.cfg
__UpperCAmelCase : Any = fs_config.final_dropout
__UpperCAmelCase : List[Any] = fs_config.layerdrop
__UpperCAmelCase : Optional[Any] = fs_config.activation_dropout
__UpperCAmelCase : Optional[int] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
__UpperCAmelCase : Any = fs_config.attention_dropout
__UpperCAmelCase : List[Any] = fs_config.dropout_input
__UpperCAmelCase : List[Any] = fs_config.dropout
__UpperCAmelCase : List[str] = fs_config.mask_channel_length
__UpperCAmelCase : Tuple = fs_config.mask_channel_prob
__UpperCAmelCase : str = fs_config.mask_length
__UpperCAmelCase : Optional[int] = fs_config.mask_prob
__UpperCAmelCase : Tuple = '''Wav2Vec2FeatureExtractor'''
__UpperCAmelCase : List[str] = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True ) -> Optional[Any]:
'''simple docstring'''
if is_finetuned:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
__UpperCAmelCase : Optional[int] = SEWConfig.from_pretrained(lowercase_ )
else:
__UpperCAmelCase : Tuple = convert_config(model[0] , lowercase_ )
__UpperCAmelCase : int = model[0].eval()
__UpperCAmelCase : str = True if config.feat_extract_norm == '''layer''' else False
__UpperCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
if is_finetuned:
if dict_path:
__UpperCAmelCase : Any = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : Tuple = target_dict.pad_index
__UpperCAmelCase : Tuple = target_dict.bos_index
__UpperCAmelCase : Any = target_dict.pad_index
__UpperCAmelCase : List[str] = target_dict.bos_index
__UpperCAmelCase : Any = target_dict.eos_index
__UpperCAmelCase : Any = len(target_dict.symbols )
__UpperCAmelCase : Dict = os.path.join(lowercase_ , '''vocab.json''' )
if not os.path.isdir(lowercase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , lowercase_ )
__UpperCAmelCase : int = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , )
__UpperCAmelCase : Optional[int] = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
__UpperCAmelCase : List[Any] = SEWForCTC(lowercase_ )
else:
__UpperCAmelCase : Any = SEWModel(lowercase_ )
feature_extractor.save_pretrained(lowercase_ )
recursively_load_weights(lowercase_ , lowercase_ , lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 675 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils)
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''])
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__UpperCAmelCase : str = test_metrics
@require_cpu
def A( self):
debug_launcher(self.test_metrics.main , num_processes=1)
@require_cpu
def A( self):
debug_launcher(self.test_metrics.main)
@require_single_gpu
def A( self):
self.test_metrics.main()
@require_multi_gpu
def A( self):
print(F"Found {torch.cuda.device_count()} devices.")
__UpperCAmelCase : Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(lowercase__ , env=os.environ.copy())
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : str = 0
@slow
def A( self):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(lowercase__) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
self.assertIsInstance(lowercase__ , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(lowercase__) , 0)
def A( self):
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def A( self):
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 2_0)
def A( self):
__UpperCAmelCase : List[str] = AutoConfig.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
# Check that tokenizer_type ≠ model_type
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(lowercase__ , config=lowercase__)
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def A( self):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowercase__ , '''vocab.txt'''))
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase__ , tokenizer_type='''bert''' , use_fast=lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowercase__ , '''vocab.json'''))
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowercase__ , '''merges.txt'''))
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(lowercase__ , tokenizer_type='''gpt2''' , use_fast=lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
@require_tokenizers
def A( self):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(lowercase__ , '''vocab.txt'''))
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(lowercase__ , tokenizer_type='''bert''')
self.assertIsInstance(lowercase__ , lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(lowercase__ , '''vocab.json'''))
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(lowercase__ , '''merges.txt'''))
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase__ , tokenizer_type='''gpt2''')
self.assertIsInstance(lowercase__ , lowercase__)
def A( self):
with pytest.raises(lowercase__):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''')
@require_tokenizers
def A( self):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''')
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
if isinstance(lowercase__ , lowercase__):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , lowercase__)
else:
self.assertEqual(tokenizer.do_lower_case , lowercase__)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
@require_tokenizers
def A( self):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowercase__ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
__UpperCAmelCase : List[str] = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''')
def A( self):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__UpperCAmelCase : Tuple = TOKENIZER_MAPPING.values()
__UpperCAmelCase : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowercase__)
@require_tokenizers
def A( self):
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowercase__) , lowercase__)
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''') , lowercase__)
@require_tokenizers
def A( self):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=lowercase__)
__UpperCAmelCase : str = '''Hello, world. How are you?'''
__UpperCAmelCase : str = tokenizer.tokenize(lowercase__)
self.assertEqual('''[UNK]''' , tokens[0])
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=lowercase__)
__UpperCAmelCase : List[Any] = tokenizer.tokenize(lowercase__)
self.assertEqual('''[UNK]''' , tokens[0])
@require_tokenizers
def A( self):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''')
self.assertEqual(type(lowercase__) , lowercase__)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0)
self.assertEqual(tokenizer.unk_token , '''[UNK]''')
self.assertEqual(tokenizer.padding_side , '''right''')
self.assertEqual(tokenizer.truncation_side , '''right''')
def A( self):
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 1_2)
def A( self):
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''ctrl''')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowercase__ , lowercase__)
def A( self):
# Check we can load the tokenizer config of an online model.
__UpperCAmelCase : Union[str, Any] = get_tokenizer_config('''bert-base-cased''')
__UpperCAmelCase : List[Any] = config.pop('''_commit_hash''' , lowercase__)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowercase__ , {'''do_lower_case''': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__UpperCAmelCase : Optional[Any] = get_tokenizer_config(lowercase__)
self.assertDictEqual(lowercase__ , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Any = get_tokenizer_config(lowercase__)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''')
def A( self):
try:
AutoConfig.register('''custom''' , lowercase__)
AutoTokenizer.register(lowercase__ , slow_tokenizer_class=lowercase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase__):
AutoTokenizer.register(lowercase__ , slow_tokenizer_class=lowercase__)
__UpperCAmelCase : Dict = CustomTokenizer.from_pretrained(lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def A( self):
try:
AutoConfig.register('''custom''' , lowercase__)
# Can register in two steps
AutoTokenizer.register(lowercase__ , slow_tokenizer_class=lowercase__)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(lowercase__ , fast_tokenizer_class=lowercase__)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowercase__ , slow_tokenizer_class=lowercase__ , fast_tokenizer_class=lowercase__)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowercase__):
AutoTokenizer.register(lowercase__ , fast_tokenizer_class=lowercase__)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : List[Any] = BertTokenizerFast.from_pretrained(lowercase__)
bert_tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : str = CustomTokenizerFast.from_pretrained(lowercase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(lowercase__ , use_fast=lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def A( self):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(lowercase__):
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowercase__):
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__)
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowercase__ , trust_remote_code=lowercase__)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowercase__)
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase__ , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''')
@require_tokenizers
def A( self):
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = False
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = NewTokenizer
_lowerCAmelCase : str = False
try:
AutoConfig.register('''custom''' , lowercase__)
AutoTokenizer.register(lowercase__ , slow_tokenizer_class=lowercase__)
AutoTokenizer.register(lowercase__ , fast_tokenizer_class=lowercase__)
# If remote code is not set, the default is to use local
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''')
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertFalse(tokenizer.special_attribute_present)
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertFalse(tokenizer.special_attribute_present)
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
self.assertTrue(tokenizer.special_attribute_present)
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def A( self):
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowercase__)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''')
# Test we can also load the slow version
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=lowercase__ , use_fast=lowercase__)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''')
def A( self):
with self.assertRaisesRegex(
lowercase__ , '''bert-base is not a local folder and is not a valid model identifier'''):
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''bert-base''')
def A( self):
with self.assertRaisesRegex(
lowercase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowercase__ , revision='''aaaaaa''')
def A( self):
# Make sure we have cached the tokenizer.
__UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
with RequestCounter() as counter:
__UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ = 1000000 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = OpenAIGPTTokenizer
_lowerCAmelCase : int = OpenAIGPTTokenizerFast
_lowerCAmelCase : Dict = True
_lowerCAmelCase : List[str] = False
def A( self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase : Union[str, Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__UpperCAmelCase : str = dict(zip(lowercase__ , range(len(lowercase__))))
__UpperCAmelCase : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
__UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''') as fp:
fp.write(json.dumps(lowercase__))
with open(self.merges_file , '''w''') as fp:
fp.write('''\n'''.join(lowercase__))
def A( self , lowercase__):
return "lower newer", "lower newer"
def A( self):
__UpperCAmelCase : Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
__UpperCAmelCase : str = '''lower'''
__UpperCAmelCase : Optional[Any] = ['''low''', '''er</w>''']
__UpperCAmelCase : str = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
__UpperCAmelCase : List[str] = tokens + ['''<unk>''']
__UpperCAmelCase : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , lowercase__)
def A( self , lowercase__=1_5):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__)
# Simple input
__UpperCAmelCase : List[str] = '''This is a simple input'''
__UpperCAmelCase : List[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
__UpperCAmelCase : str = ('''This is a simple input''', '''This is a pair''')
__UpperCAmelCase : str = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding='''max_length''')
# Simple input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding='''max_length''')
# Simple input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode , lowercase__ , max_length=lowercase__ , padding='''max_length''')
# Pair input
self.assertRaises(lowercase__ , tokenizer_r.encode_plus , lowercase__ , max_length=lowercase__ , padding='''max_length''')
# Pair input
self.assertRaises(
lowercase__ , tokenizer_r.batch_encode_plus , lowercase__ , max_length=lowercase__ , padding='''max_length''' , )
def A( self):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCamelCase ( _UpperCamelCase ):
pass
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCAmelCase : Optional[Any] = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
__UpperCAmelCase : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCAmelCase : int = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCAmelCase : Dict = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = dct.pop(lowercase_ )
__UpperCAmelCase : List[Any] = val
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if "handwritten" in checkpoint_url:
__UpperCAmelCase : Dict = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase : List[str] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__UpperCAmelCase : str = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : int = ViTConfig(image_size=384 , qkv_bias=lowercase_ )
__UpperCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCAmelCase : List[Any] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCAmelCase : Optional[int] = 1024
__UpperCAmelCase : Union[str, Any] = 4096
__UpperCAmelCase : Dict = 24
__UpperCAmelCase : str = 16
__UpperCAmelCase : Dict = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[Any] = '''relu'''
__UpperCAmelCase : Tuple = 1024
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Dict = False
__UpperCAmelCase : str = False
# load HuggingFace model
__UpperCAmelCase : Union[str, Any] = ViTModel(lowercase_ , add_pooling_layer=lowercase_ )
__UpperCAmelCase : str = TrOCRForCausalLM(lowercase_ )
__UpperCAmelCase : Dict = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
# load state_dict of original model, rename some keys
__UpperCAmelCase : str = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' , check_hash=lowercase_ )['''model''']
__UpperCAmelCase : List[Any] = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCAmelCase : Optional[Any] = state_dict.pop(lowercase_ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__UpperCAmelCase : Tuple = val
else:
__UpperCAmelCase : Optional[int] = val
# load state dict
model.load_state_dict(lowercase_ )
# Check outputs on an image
__UpperCAmelCase : Optional[int] = ViTImageProcessor(size=encoder_config.image_size )
__UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained('''roberta-large''' )
__UpperCAmelCase : int = TrOCRProcessor(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = processor(images=prepare_img(lowercase_ ) , return_tensors='''pt''' ).pixel_values
# verify logits
__UpperCAmelCase : List[Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCAmelCase : int = model(pixel_values=lowercase_ , decoder_input_ids=lowercase_ )
__UpperCAmelCase : int = outputs.logits
__UpperCAmelCase : Optional[Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCAmelCase : Optional[Any] = torch.tensor(
[-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCAmelCase : List[Any] = torch.tensor(
[-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCAmelCase : Any = torch.tensor(
[-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , lowercase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCAmelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = int(lowercase_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = t // 3600, (t // 60) % 60, t % 60
return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=300 ) -> Optional[int]:
'''simple docstring'''
return f"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__UpperCAmelCase : Optional[Any] = f"{elt:.6f}" if isinstance(lowercase_ , lowercase_ ) else str(lowercase_ )
html_code += f" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCamelCase :
_lowerCAmelCase : Optional[Any] = 5
_lowerCAmelCase : List[Any] = 0.2
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = 3_0_0 , ):
__UpperCAmelCase : Optional[Any] = total
__UpperCAmelCase : Tuple = '''''' if prefix is None else prefix
__UpperCAmelCase : Any = leave
__UpperCAmelCase : Any = parent
__UpperCAmelCase : List[str] = width
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Tuple = None
def A( self , lowercase__ , lowercase__ = False , lowercase__ = None):
__UpperCAmelCase : Tuple = value
if comment is not None:
__UpperCAmelCase : Optional[Any] = comment
if self.last_value is None:
__UpperCAmelCase : Any = time.time()
__UpperCAmelCase : List[str] = value
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Dict = self.warmup
__UpperCAmelCase : Optional[Any] = 1
self.update_bar(lowercase__)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
__UpperCAmelCase : int = time.time()
__UpperCAmelCase : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__UpperCAmelCase : List[Any] = self.elapsed_time / (value - self.start_value)
else:
__UpperCAmelCase : Optional[int] = None
if value >= self.total:
__UpperCAmelCase : Union[str, Any] = self.total
__UpperCAmelCase : Tuple = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__UpperCAmelCase : Tuple = self.average_time_per_item * (self.total - value)
self.update_bar(lowercase__)
__UpperCAmelCase : List[str] = value
__UpperCAmelCase : Dict = current_time
if self.average_time_per_item is None:
__UpperCAmelCase : str = 1
else:
__UpperCAmelCase : Optional[Any] = max(int(self.update_every / self.average_time_per_item) , 1)
def A( self , lowercase__ , lowercase__=None):
__UpperCAmelCase : int = ''' ''' * (len(str(self.total)) - len(str(lowercase__))) + str(lowercase__)
if self.elapsed_time is None:
__UpperCAmelCase : str = F"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
__UpperCAmelCase : List[str] = F"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
__UpperCAmelCase : Dict = (
F"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"
F" {format_time(self.predicted_remaining)}"
)
self.label += F", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else F", {self.comment}]"
self.display()
def A( self):
__UpperCAmelCase : Tuple = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__UpperCAmelCase : List[str] = disp.display(disp.HTML(self.html_code) , display_id=lowercase__)
else:
self.output.update(disp.HTML(self.html_code))
def A( self):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''''''))
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__=None):
super().__init__(lowercase__)
__UpperCAmelCase : str = None if column_names is None else [column_names]
__UpperCAmelCase : List[Any] = None
def A( self):
__UpperCAmelCase : Dict = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__UpperCAmelCase : Optional[Any] = disp.display(disp.HTML(self.html_code) , display_id=lowercase__)
else:
self.output.update(disp.HTML(self.html_code))
def A( self , lowercase__):
if self.inner_table is None:
__UpperCAmelCase : Dict = [list(values.keys()), list(values.values())]
else:
__UpperCAmelCase : Optional[Any] = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowercase__)
__UpperCAmelCase : Dict = columns
self.inner_table.append([values[c] for c in columns])
def A( self , lowercase__ , lowercase__=None , lowercase__=3_0_0):
__UpperCAmelCase : int = NotebookProgressBar(lowercase__ , prefix=lowercase__ , parent=self , width=lowercase__)
return self.child_bar
def A( self):
__UpperCAmelCase : str = None
self.display()
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self):
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Dict = False
def A( self , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
__UpperCAmelCase : List[Any] = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Any = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''')
__UpperCAmelCase : List[str] = NotebookTrainingTracker(state.max_steps , lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
__UpperCAmelCase : Any = int(state.epoch) if int(state.epoch) == state.epoch else F"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=F"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
__UpperCAmelCase : List[Any] = False
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , **lowercase__):
if not has_length(lowercase__):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__UpperCAmelCase : Union[str, Any] = self.training_tracker.add_child(len(lowercase__))
else:
__UpperCAmelCase : Tuple = NotebookProgressBar(len(lowercase__))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
if self.prediction_bar is not None:
self.prediction_bar.close()
__UpperCAmelCase : Union[str, Any] = None
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , **lowercase__):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__UpperCAmelCase : Union[str, Any] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__UpperCAmelCase : List[Any] = state.global_step
self.training_tracker.write_line(lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None , **lowercase__):
if self.training_tracker is not None:
__UpperCAmelCase : Tuple = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history):
if "loss" in log:
__UpperCAmelCase : Tuple = log['''loss''']
break
if self.first_column == "Epoch":
__UpperCAmelCase : List[str] = int(state.epoch)
else:
__UpperCAmelCase : Any = state.global_step
__UpperCAmelCase : Union[str, Any] = '''eval'''
for k in metrics:
if k.endswith('''_loss'''):
__UpperCAmelCase : int = re.sub(r'''\_loss$''' , '''''' , lowercase__)
__UpperCAmelCase : List[Any] = metrics.pop('''total_flos''' , lowercase__)
__UpperCAmelCase : List[str] = metrics.pop('''epoch''' , lowercase__)
__UpperCAmelCase : int = metrics.pop(F"{metric_key_prefix}_runtime" , lowercase__)
__UpperCAmelCase : Dict = metrics.pop(F"{metric_key_prefix}_samples_per_second" , lowercase__)
__UpperCAmelCase : Any = metrics.pop(F"{metric_key_prefix}_steps_per_second" , lowercase__)
__UpperCAmelCase : Optional[Any] = metrics.pop(F"{metric_key_prefix}_jit_compilation_time" , lowercase__)
for k, v in metrics.items():
if k == F"{metric_key_prefix}_loss":
__UpperCAmelCase : List[str] = v
else:
__UpperCAmelCase : Dict = k.split('''_''')
__UpperCAmelCase : Optional[int] = ''' '''.join([part.capitalize() for part in splits[1:]])
__UpperCAmelCase : Optional[Any] = v
self.training_tracker.write_line(lowercase__)
self.training_tracker.remove_child()
__UpperCAmelCase : List[Any] = None
# Evaluation takes a long time so we should force the next update.
__UpperCAmelCase : Any = True
def A( self , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
self.training_tracker.update(
state.global_step , comment=F"Epoch {int(state.epoch)}/{state.num_train_epochs}" , force_update=lowercase__)
__UpperCAmelCase : Optional[int] = None
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''mra'''
def __init__( self , lowercase__=5_0_2_6_5 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1 , lowercase__=0.0_2 , lowercase__=1e-5 , lowercase__="absolute" , lowercase__=4 , lowercase__="full" , lowercase__=0 , lowercase__=0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Union[str, Any] = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = position_embedding_type
__UpperCAmelCase : List[str] = block_per_row
__UpperCAmelCase : Optional[int] = approx_mode
__UpperCAmelCase : List[str] = initial_prior_first_n_blocks
__UpperCAmelCase : List[Any] = initial_prior_diagonal_n_blocks
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCAmelCase = {"""facebook/blenderbot_small-90M""": 512}
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = set()
__UpperCAmelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
__UpperCAmelCase : Union[str, Any] = set(lowercase_ )
return pairs
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase__ , lowercase__ , lowercase__="__start__" , lowercase__="__end__" , lowercase__="__unk__" , lowercase__="__null__" , **lowercase__ , ):
super().__init__(unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , **lowercase__)
with open(lowercase__ , encoding='''utf-8''') as vocab_handle:
__UpperCAmelCase : str = json.load(lowercase__)
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(lowercase__ , encoding='''utf-8''') as merges_handle:
__UpperCAmelCase : Optional[Any] = merges_handle.read().split('''\n''')[1:-1]
__UpperCAmelCase : Optional[int] = [tuple(merge.split()) for merge in merges]
__UpperCAmelCase : List[str] = dict(zip(lowercase__ , range(len(lowercase__))))
__UpperCAmelCase : Tuple = {}
@property
def A( self):
return len(self.encoder)
def A( self):
return dict(self.encoder , **self.added_tokens_encoder)
def A( self , lowercase__):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = re.sub('''([.,!?()])''' , r''' \1''' , lowercase__)
__UpperCAmelCase : Optional[int] = re.sub('''(\')''' , r''' \1 ''' , lowercase__)
__UpperCAmelCase : List[Any] = re.sub(r'''\s{2,}''' , ''' ''' , lowercase__)
if "\n" in token:
__UpperCAmelCase : int = token.replace('''\n''' , ''' __newln__''')
__UpperCAmelCase : str = token.split(''' ''')
__UpperCAmelCase : Any = []
for token in tokens:
if not len(lowercase__):
continue
__UpperCAmelCase : List[str] = token.lower()
__UpperCAmelCase : Tuple = tuple(lowercase__)
__UpperCAmelCase : int = tuple(list(word[:-1]) + [word[-1] + '''</w>'''])
__UpperCAmelCase : List[str] = get_pairs(lowercase__)
if not pairs:
words.append(lowercase__)
continue
while True:
__UpperCAmelCase : Optional[Any] = min(lowercase__ , key=lambda lowercase__: self.bpe_ranks.get(lowercase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[Any] = 0
while i < len(lowercase__):
try:
__UpperCAmelCase : Optional[int] = word.index(lowercase__ , lowercase__)
new_word.extend(word[i:j])
__UpperCAmelCase : List[Any] = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowercase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__UpperCAmelCase : List[Any] = tuple(lowercase__)
__UpperCAmelCase : List[Any] = new_word
if len(lowercase__) == 1:
break
else:
__UpperCAmelCase : Dict = get_pairs(lowercase__)
__UpperCAmelCase : Union[str, Any] = '''@@ '''.join(lowercase__)
__UpperCAmelCase : Any = word[:-4]
__UpperCAmelCase : Union[str, Any] = word
words.append(lowercase__)
return " ".join(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = re.findall(r'''\S+\n?''' , lowercase__)
for token in words:
split_tokens.extend(list(self.bpe(lowercase__).split(''' ''')))
return split_tokens
def A( self , lowercase__):
__UpperCAmelCase : Any = token.lower()
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token))
def A( self , lowercase__):
return self.decoder.get(lowercase__ , self.unk_token)
def A( self , lowercase__):
__UpperCAmelCase : List[str] = ''' '''.join(lowercase__).replace('''@@ ''' , '''''').strip()
return out_string
def A( self , lowercase__ , lowercase__ = None):
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : int = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : List[str] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(lowercase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__) + '''\n''')
__UpperCAmelCase : List[str] = 0
with open(lowercase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''')
__UpperCAmelCase : List[Any] = token_index
writer.write(''' '''.join(lowercase__) + '''\n''')
index += 1
return vocab_file, merge_file
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
assert column_title.isupper()
__UpperCAmelCase : str = 0
__UpperCAmelCase : Dict = len(lowercase_ ) - 1
__UpperCAmelCase : Optional[int] = 0
while index >= 0:
__UpperCAmelCase : str = (ord(column_title[index] ) - 64) * pow(26 , lowercase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCAmelCase = 3
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
print('''Generating primitive root of p''' )
while True:
__UpperCAmelCase : List[str] = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
'''simple docstring'''
print('''Generating prime p...''' )
__UpperCAmelCase : Optional[Any] = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
__UpperCAmelCase : List[str] = primitive_root(lowercase_ ) # one primitive root on modulo p.
__UpperCAmelCase : int = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
__UpperCAmelCase : str = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
__UpperCAmelCase : str = (key_size, e_a, e_a, p)
__UpperCAmelCase : int = (key_size, d)
return public_key, private_key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
'''simple docstring'''
if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ):
print('''\nWARNING:''' )
print(
f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__UpperCAmelCase , __UpperCAmelCase : str = generate_key(lowercase_ )
print(f"\nWriting public key to file {name}_pubkey.txt..." )
with open(f"{name}_pubkey.txt" , '''w''' ) as fo:
fo.write(f"{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}" )
print(f"Writing private key to file {name}_privkey.txt..." )
with open(f"{name}_privkey.txt" , '''w''' ) as fo:
fo.write(f"{private_key[0]},{private_key[1]}" )
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print('''Making key files...''' )
make_key_files('''elgamal''' , 2048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 675 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[str] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
__UpperCAmelCase : Any = MaskFormerConfig(backbone_config=lowercase_ )
__UpperCAmelCase : str = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__UpperCAmelCase : int = 847
__UpperCAmelCase : Dict = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__UpperCAmelCase : Dict = 150
__UpperCAmelCase : Dict = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__UpperCAmelCase : Union[str, Any] = 171
__UpperCAmelCase : List[Any] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__UpperCAmelCase : Tuple = 133
__UpperCAmelCase : Optional[int] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__UpperCAmelCase : Tuple = 19
__UpperCAmelCase : Union[str, Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__UpperCAmelCase : Optional[int] = 65
__UpperCAmelCase : List[str] = '''mapillary-vistas-id2label.json'''
__UpperCAmelCase : List[str] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : Union[str, Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Tuple = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") )
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = dct.pop(lowercase_ )
__UpperCAmelCase : Optional[int] = val
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : Dict = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
__UpperCAmelCase : List[Any] = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : List[Any] = in_proj_weight[:dim, :]
__UpperCAmelCase : Tuple = in_proj_bias[: dim]
__UpperCAmelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : List[str] = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : str = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : Any = in_proj_bias[-dim :]
# fmt: on
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__UpperCAmelCase : Optional[int] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
__UpperCAmelCase : Optional[Any] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : int = in_proj_weight[: hidden_size, :]
__UpperCAmelCase : Optional[int] = in_proj_bias[:config.hidden_size]
__UpperCAmelCase : Tuple = in_proj_weight[hidden_size : hidden_size * 2, :]
__UpperCAmelCase : Any = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : str = in_proj_weight[-hidden_size :, :]
__UpperCAmelCase : List[str] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__UpperCAmelCase : Dict = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
__UpperCAmelCase : Optional[Any] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : List[Any] = in_proj_weight[: hidden_size, :]
__UpperCAmelCase : List[str] = in_proj_bias[:config.hidden_size]
__UpperCAmelCase : Tuple = in_proj_weight[hidden_size : hidden_size * 2, :]
__UpperCAmelCase : Dict = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : Optional[Any] = in_proj_weight[-hidden_size :, :]
__UpperCAmelCase : List[str] = in_proj_bias[-hidden_size :]
# fmt: on
def __SCREAMING_SNAKE_CASE ( ) -> torch.Tensor:
'''simple docstring'''
__UpperCAmelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = get_maskformer_config(lowercase_ )
# load original state_dict
with open(lowercase_ , '''rb''' ) as f:
__UpperCAmelCase : Optional[int] = pickle.load(lowercase_ )
__UpperCAmelCase : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__UpperCAmelCase : List[str] = create_rename_keys(lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_swin_q_k_v(lowercase_ , config.backbone_config )
read_in_decoder_q_k_v(lowercase_ , lowercase_ )
# update to torch tensors
for key, value in state_dict.items():
__UpperCAmelCase : Tuple = torch.from_numpy(lowercase_ )
# load 🤗 model
__UpperCAmelCase : Optional[int] = MaskFormerForInstanceSegmentation(lowercase_ )
model.eval()
for name, param in model.named_parameters():
print(lowercase_ , param.shape )
__UpperCAmelCase , __UpperCAmelCase : Any = model.load_state_dict(lowercase_ , strict=lowercase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowercase_ ) == 0, f"Unexpected keys: {unexpected_keys}"
# verify results
__UpperCAmelCase : str = prepare_img()
if "vistas" in model_name:
__UpperCAmelCase : int = 65
elif "cityscapes" in model_name:
__UpperCAmelCase : Union[str, Any] = 65535
else:
__UpperCAmelCase : Optional[int] = 255
__UpperCAmelCase : int = True if '''ade''' in model_name else False
__UpperCAmelCase : List[Any] = MaskFormerImageProcessor(ignore_index=lowercase_ , reduce_labels=lowercase_ )
__UpperCAmelCase : Tuple = image_processor(lowercase_ , return_tensors='''pt''' )
__UpperCAmelCase : Any = model(**lowercase_ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowercase_ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f"nielsr/{model_name}" )
image_processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : "DiagonalGaussianDistribution"
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
_lowerCAmelCase : Tuple = True
@register_to_config
def __init__( self , lowercase__ = 3 , lowercase__ = 3 , lowercase__ = ("DownEncoderBlock2D",) , lowercase__ = ("UpDecoderBlock2D",) , lowercase__ = (6_4,) , lowercase__ = 1 , lowercase__ = "silu" , lowercase__ = 4 , lowercase__ = 3_2 , lowercase__ = 3_2 , lowercase__ = 0.1_8_2_1_5 , ):
super().__init__()
# pass init params to Encoder
__UpperCAmelCase : Dict = Encoder(
in_channels=lowercase__ , out_channels=lowercase__ , down_block_types=lowercase__ , block_out_channels=lowercase__ , layers_per_block=lowercase__ , act_fn=lowercase__ , norm_num_groups=lowercase__ , double_z=lowercase__ , )
# pass init params to Decoder
__UpperCAmelCase : Any = Decoder(
in_channels=lowercase__ , out_channels=lowercase__ , up_block_types=lowercase__ , block_out_channels=lowercase__ , layers_per_block=lowercase__ , norm_num_groups=lowercase__ , act_fn=lowercase__ , )
__UpperCAmelCase : str = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
__UpperCAmelCase : Tuple = nn.Convad(lowercase__ , lowercase__ , 1)
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[Any] = False
# only relevant if vae tiling is enabled
__UpperCAmelCase : Optional[int] = self.config.sample_size
__UpperCAmelCase : str = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
__UpperCAmelCase : str = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
__UpperCAmelCase : List[str] = 0.2_5
def A( self , lowercase__ , lowercase__=False):
if isinstance(lowercase__ , (Encoder, Decoder)):
__UpperCAmelCase : Union[str, Any] = value
def A( self , lowercase__ = True):
__UpperCAmelCase : Optional[Any] = use_tiling
def A( self):
self.enable_tiling(lowercase__)
def A( self):
__UpperCAmelCase : int = True
def A( self):
__UpperCAmelCase : int = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A( self):
__UpperCAmelCase : List[str] = {}
def fn_recursive_add_processors(lowercase__ , lowercase__ , lowercase__):
if hasattr(lowercase__ , '''set_processor'''):
__UpperCAmelCase : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowercase__ , lowercase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase__ , lowercase__ , lowercase__)
return processors
def A( self , lowercase__):
__UpperCAmelCase : str = len(self.attn_processors.keys())
if isinstance(lowercase__ , lowercase__) and len(lowercase__) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowercase__)} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(lowercase__ , lowercase__ , lowercase__):
if hasattr(lowercase__ , '''set_processor'''):
if not isinstance(lowercase__ , lowercase__):
module.set_processor(lowercase__)
else:
module.set_processor(processor.pop(F"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowercase__ , lowercase__)
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase__ , lowercase__ , lowercase__)
def A( self):
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def A( self , lowercase__ , lowercase__ = True):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowercase__ , return_dict=lowercase__)
if self.use_slicing and x.shape[0] > 1:
__UpperCAmelCase : List[Any] = [self.encoder(lowercase__) for x_slice in x.split(1)]
__UpperCAmelCase : str = torch.cat(lowercase__)
else:
__UpperCAmelCase : Optional[Any] = self.encoder(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.quant_conv(lowercase__)
__UpperCAmelCase : Union[str, Any] = DiagonalGaussianDistribution(lowercase__)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase__)
def A( self , lowercase__ , lowercase__ = True):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowercase__ , return_dict=lowercase__)
__UpperCAmelCase : str = self.post_quant_conv(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.decoder(lowercase__)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__)
@apply_forward_hook
def A( self , lowercase__ , lowercase__ = True):
if self.use_slicing and z.shape[0] > 1:
__UpperCAmelCase : Optional[int] = [self._decode(lowercase__).sample for z_slice in z.split(1)]
__UpperCAmelCase : Any = torch.cat(lowercase__)
else:
__UpperCAmelCase : Optional[int] = self._decode(lowercase__).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = min(a.shape[2] , b.shape[2] , lowercase__)
for y in range(lowercase__):
__UpperCAmelCase : List[str] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = min(a.shape[3] , b.shape[3] , lowercase__)
for x in range(lowercase__):
__UpperCAmelCase : Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def A( self , lowercase__ , lowercase__ = True):
__UpperCAmelCase : List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
__UpperCAmelCase : str = int(self.tile_latent_min_size * self.tile_overlap_factor)
__UpperCAmelCase : Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__UpperCAmelCase : Optional[int] = []
for i in range(0 , x.shape[2] , lowercase__):
__UpperCAmelCase : int = []
for j in range(0 , x.shape[3] , lowercase__):
__UpperCAmelCase : Any = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__UpperCAmelCase : int = self.encoder(lowercase__)
__UpperCAmelCase : List[str] = self.quant_conv(lowercase__)
row.append(lowercase__)
rows.append(lowercase__)
__UpperCAmelCase : str = []
for i, row in enumerate(lowercase__):
__UpperCAmelCase : Any = []
for j, tile in enumerate(lowercase__):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__UpperCAmelCase : Optional[Any] = self.blend_v(rows[i - 1][j] , lowercase__ , lowercase__)
if j > 0:
__UpperCAmelCase : Tuple = self.blend_h(row[j - 1] , lowercase__ , lowercase__)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(lowercase__ , dim=3))
__UpperCAmelCase : int = torch.cat(lowercase__ , dim=2)
__UpperCAmelCase : int = DiagonalGaussianDistribution(lowercase__)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowercase__)
def A( self , lowercase__ , lowercase__ = True):
__UpperCAmelCase : Dict = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
__UpperCAmelCase : Tuple = int(self.tile_sample_min_size * self.tile_overlap_factor)
__UpperCAmelCase : List[Any] = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__UpperCAmelCase : Union[str, Any] = []
for i in range(0 , z.shape[2] , lowercase__):
__UpperCAmelCase : str = []
for j in range(0 , z.shape[3] , lowercase__):
__UpperCAmelCase : Any = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__UpperCAmelCase : Union[str, Any] = self.post_quant_conv(lowercase__)
__UpperCAmelCase : Tuple = self.decoder(lowercase__)
row.append(lowercase__)
rows.append(lowercase__)
__UpperCAmelCase : Union[str, Any] = []
for i, row in enumerate(lowercase__):
__UpperCAmelCase : List[Any] = []
for j, tile in enumerate(lowercase__):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__UpperCAmelCase : Tuple = self.blend_v(rows[i - 1][j] , lowercase__ , lowercase__)
if j > 0:
__UpperCAmelCase : List[str] = self.blend_h(row[j - 1] , lowercase__ , lowercase__)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(lowercase__ , dim=3))
__UpperCAmelCase : str = torch.cat(lowercase__ , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__)
def A( self , lowercase__ , lowercase__ = False , lowercase__ = True , lowercase__ = None , ):
__UpperCAmelCase : List[str] = sample
__UpperCAmelCase : int = self.encode(lowercase__).latent_dist
if sample_posterior:
__UpperCAmelCase : Optional[int] = posterior.sample(generator=lowercase__)
else:
__UpperCAmelCase : str = posterior.mode()
__UpperCAmelCase : List[Any] = self.decode(lowercase__).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__)
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(lowercase_ ):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
__UpperCAmelCase : Any = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
__UpperCAmelCase : int = PipelineDataFormat.from_str(
format=lowercase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(lowercase_ , lowercase_ )
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = nlp
__UpperCAmelCase : Any = reader
@staticmethod
def A( lowercase__):
__UpperCAmelCase : Optional[int] = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''')
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''')
run_parser.add_argument('''--input''' , type=lowercase__ , help='''Path to the file to use for inference''')
run_parser.add_argument('''--output''' , type=lowercase__ , help='''Path to the file that will be used post to write results.''')
run_parser.add_argument('''--model''' , type=lowercase__ , help='''Name or path to the model to instantiate.''')
run_parser.add_argument('''--config''' , type=lowercase__ , help='''Name or path to the model\'s config to instantiate.''')
run_parser.add_argument(
'''--tokenizer''' , type=lowercase__ , help='''Name of the tokenizer to use. (default: same as the model name)''')
run_parser.add_argument(
'''--column''' , type=lowercase__ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=lowercase__ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=lowercase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''')
run_parser.set_defaults(func=lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self._nlp, []
for entry in self._reader:
__UpperCAmelCase : Tuple = nlp(**lowercase__) if self._reader.is_multi_columns else nlp(lowercase__)
if isinstance(lowercase__ , lowercase__):
outputs.append(lowercase__)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
__UpperCAmelCase : Tuple = self._reader.save_binary(lowercase__)
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}")
else:
self._reader.save(lowercase__)
| 675 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=1024 , lowercase_=1024 , lowercase_=False , **lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(lowercase_ )
__UpperCAmelCase : Optional[Any] = SeqaSeqDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ , type_path='''train''' , **lowercase_ )
__UpperCAmelCase : int = tok.pad_token_id
def get_lens(lowercase_ ):
__UpperCAmelCase : Tuple = tqdm(
DataLoader(lowercase_ , batch_size=512 , num_workers=8 , shuffle=lowercase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__UpperCAmelCase : Any = []
for batch in dl:
__UpperCAmelCase : str = batch['''input_ids'''].ne(lowercase_ ).sum(1 ).tolist()
__UpperCAmelCase : List[str] = batch['''labels'''].ne(lowercase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase_ , lowercase_ ):
max_lens.append(max(lowercase_ , lowercase_ ) )
else:
max_lens.extend(lowercase_ )
return max_lens
__UpperCAmelCase : List[Any] = get_lens(lowercase_ )
__UpperCAmelCase : Tuple = SeqaSeqDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ , type_path='''val''' , **lowercase_ )
__UpperCAmelCase : str = get_lens(lowercase_ )
pickle_save(lowercase_ , train_ds.len_file )
pickle_save(lowercase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 675 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 1 |
from __future__ import annotations
lowerCAmelCase = [True] * 1_000_001
lowerCAmelCase = 2
while i * i <= 1_000_000:
if seive[i]:
for j in range(i * i, 1_000_001, i):
lowerCAmelCase = False
i += 1
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
return seive[n]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ = 1000000 ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowercase_ ) and not contains_an_even_digit(lowercase_ ):
__UpperCAmelCase : Union[str, Any] = str(lowercase_ )
__UpperCAmelCase : List[str] = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowercase_ ) )]
if all(is_prime(lowercase_ ) for i in list_nums ):
result.append(lowercase_ )
return result
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'{len(find_circular_primes()) = }')
| 675 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = ['''image_processor''', '''tokenizer''']
_lowerCAmelCase : Optional[int] = '''LayoutLMv3ImageProcessor'''
_lowerCAmelCase : List[str] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__):
__UpperCAmelCase : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__UpperCAmelCase : Tuple = kwargs.pop('''feature_extractor''')
__UpperCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(lowercase__ , lowercase__)
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''')
# first, apply the image processor
__UpperCAmelCase : Tuple = self.image_processor(images=lowercase__ , return_tensors=lowercase__)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : str = [text] # add batch dimension (as the image processor always adds a batch dimension)
__UpperCAmelCase : Dict = features['''words''']
__UpperCAmelCase : Any = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel values
__UpperCAmelCase : List[str] = features.pop('''pixel_values''')
if return_overflowing_tokens is True:
__UpperCAmelCase : Union[str, Any] = self.get_overflowing_images(lowercase__ , encoded_inputs['''overflow_to_sample_mapping'''])
__UpperCAmelCase : Any = images
return encoded_inputs
def A( self , lowercase__ , lowercase__):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__UpperCAmelCase : Any = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(lowercase__) != len(lowercase__):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(lowercase__)} and {len(lowercase__)}")
return images_with_overflow
def A( self , *lowercase__ , **lowercase__):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__)
def A( self , *lowercase__ , **lowercase__):
return self.tokenizer.decode(*lowercase__ , **lowercase__)
@property
def A( self):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def A( self):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def A( self):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=2 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : List[str] = 1_3
__UpperCAmelCase : Tuple = 7
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : int = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : str = 9_9
__UpperCAmelCase : List[str] = 3_8_4
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : Any = 4
__UpperCAmelCase : Tuple = 3_7
__UpperCAmelCase : str = '''gelu'''
__UpperCAmelCase : Any = 0.1
__UpperCAmelCase : Dict = 0.1
__UpperCAmelCase : Union[str, Any] = 5_1_2
__UpperCAmelCase : Tuple = 1_6
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Dict = 0.0_2
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[Any] = 1_2_8
__UpperCAmelCase : Union[str, Any] = 2
__UpperCAmelCase : str = 9
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : int = None
def A( self):
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = None
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : int = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = TFConvBertModel(config=lowercase__)
__UpperCAmelCase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCAmelCase : Any = [input_ids, input_mask]
__UpperCAmelCase : Tuple = model(lowercase__)
__UpperCAmelCase : Optional[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Any = TFConvBertForMaskedLM(config=lowercase__)
__UpperCAmelCase : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : Optional[Any] = model(lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : Dict = TFConvBertForSequenceClassification(config=lowercase__)
__UpperCAmelCase : Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : Tuple = model(lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = self.num_choices
__UpperCAmelCase : List[str] = TFConvBertForMultipleChoice(config=lowercase__)
__UpperCAmelCase : Tuple = tf.tile(tf.expand_dims(lowercase__ , 1) , (1, self.num_choices, 1))
__UpperCAmelCase : List[Any] = tf.tile(tf.expand_dims(lowercase__ , 1) , (1, self.num_choices, 1))
__UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(lowercase__ , 1) , (1, self.num_choices, 1))
__UpperCAmelCase : List[str] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCAmelCase : Any = model(lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : Optional[int] = TFConvBertForTokenClassification(config=lowercase__)
__UpperCAmelCase : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : Any = model(lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = TFConvBertForQuestionAnswering(config=lowercase__)
__UpperCAmelCase : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCAmelCase : Optional[Any] = model(lowercase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A( self):
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : List[Any] = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : int = False
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Optional[Any] = False
def A( self):
__UpperCAmelCase : Union[str, Any] = TFConvBertModelTester(self)
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__)
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__)
def A( self):
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__)
def A( self):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = True
__UpperCAmelCase : str = True
if hasattr(lowercase__ , '''use_cache'''):
__UpperCAmelCase : Any = True
__UpperCAmelCase : Union[str, Any] = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length)
__UpperCAmelCase : Union[str, Any] = getattr(self.model_tester , '''key_length''' , lowercase__)
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = self._prepare_for_class(lowercase__ , lowercase__)
__UpperCAmelCase : List[Any] = model_class(lowercase__)
__UpperCAmelCase : List[Any] = len(model(lowercase__))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ , saved_model=lowercase__)
__UpperCAmelCase : Optional[int] = os.path.join(lowercase__ , '''saved_model''' , '''1''')
__UpperCAmelCase : int = tf.keras.models.load_model(lowercase__)
__UpperCAmelCase : Tuple = model(lowercase__)
if self.is_encoder_decoder:
__UpperCAmelCase : Optional[int] = outputs['''encoder_hidden_states''']
__UpperCAmelCase : Any = outputs['''encoder_attentions''']
else:
__UpperCAmelCase : Optional[int] = outputs['''hidden_states''']
__UpperCAmelCase : Optional[int] = outputs['''attentions''']
self.assertEqual(len(lowercase__) , lowercase__)
__UpperCAmelCase : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowercase__) , lowercase__)
self.assertListEqual(
list(output_hidden_states[0].shape[-2:]) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowercase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def A( self):
__UpperCAmelCase : Optional[Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''')
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Optional[Any] = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length)
__UpperCAmelCase : int = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length)
__UpperCAmelCase : Optional[int] = getattr(self.model_tester , '''key_length''' , lowercase__)
__UpperCAmelCase : int = getattr(self.model_tester , '''key_length''' , lowercase__)
def check_decoder_attentions_output(lowercase__):
__UpperCAmelCase : Dict = len(lowercase__)
self.assertEqual(out_len % 2 , 0)
__UpperCAmelCase : Tuple = outputs.decoder_attentions
self.assertEqual(len(lowercase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase__):
__UpperCAmelCase : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : int = False
__UpperCAmelCase : List[str] = model_class(lowercase__)
__UpperCAmelCase : Tuple = model(self._prepare_for_class(lowercase__ , lowercase__))
__UpperCAmelCase : Any = len(lowercase__)
self.assertEqual(config.output_hidden_states , lowercase__)
check_encoder_attentions_output(lowercase__)
if self.is_encoder_decoder:
__UpperCAmelCase : str = model_class(lowercase__)
__UpperCAmelCase : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__))
self.assertEqual(config.output_hidden_states , lowercase__)
check_decoder_attentions_output(lowercase__)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : str = model_class(lowercase__)
__UpperCAmelCase : List[str] = model(self._prepare_for_class(lowercase__ , lowercase__))
self.assertEqual(config.output_hidden_states , lowercase__)
check_encoder_attentions_output(lowercase__)
# Check attention is always last and order is fine
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : Optional[int] = model_class(lowercase__)
__UpperCAmelCase : Dict = model(self._prepare_for_class(lowercase__ , lowercase__))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__))
self.assertEqual(model.config.output_hidden_states , lowercase__)
check_encoder_attentions_output(lowercase__)
@require_tf
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''')
__UpperCAmelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]])
__UpperCAmelCase : str = model(lowercase__)[0]
__UpperCAmelCase : Tuple = [1, 6, 7_6_8]
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Union[str, Any] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
])
tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1e-4)
| 675 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = KandinskyInpaintPipeline
_lowerCAmelCase : List[Any] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_lowerCAmelCase : Any = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_lowerCAmelCase : str = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowerCAmelCase : Union[str, Any] = False
@property
def A( self):
return 3_2
@property
def A( self):
return 3_2
@property
def A( self):
return self.time_input_dim
@property
def A( self):
return self.time_input_dim * 4
@property
def A( self):
return 1_0_0
@property
def A( self):
__UpperCAmelCase : List[Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''')
return tokenizer
@property
def A( self):
torch.manual_seed(0)
__UpperCAmelCase : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__UpperCAmelCase : Dict = MultilingualCLIP(lowercase__)
__UpperCAmelCase : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def A( self):
torch.manual_seed(0)
__UpperCAmelCase : Union[str, Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__UpperCAmelCase : Optional[int] = UNetaDConditionModel(**lowercase__)
return model
@property
def A( self):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A( self):
torch.manual_seed(0)
__UpperCAmelCase : List[str] = VQModel(**self.dummy_movq_kwargs)
return model
def A( self):
__UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
__UpperCAmelCase : List[Any] = self.dummy_tokenizer
__UpperCAmelCase : str = self.dummy_unet
__UpperCAmelCase : Optional[Any] = self.dummy_movq
__UpperCAmelCase : Optional[int] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=lowercase__ , )
__UpperCAmelCase : Tuple = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A( self , lowercase__ , lowercase__=0):
__UpperCAmelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase__)).to(lowercase__)
__UpperCAmelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1)).to(lowercase__)
# create init_image
__UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowercase__)).to(lowercase__)
__UpperCAmelCase : Any = image.cpu().permute(0 , 2 , 3 , 1)[0]
__UpperCAmelCase : int = Image.fromarray(np.uinta(lowercase__)).convert('''RGB''').resize((2_5_6, 2_5_6))
# create mask
__UpperCAmelCase : Dict = np.ones((6_4, 6_4) , dtype=np.floataa)
__UpperCAmelCase : List[str] = 0
if str(lowercase__).startswith('''mps'''):
__UpperCAmelCase : Dict = torch.manual_seed(lowercase__)
else:
__UpperCAmelCase : List[Any] = torch.Generator(device=lowercase__).manual_seed(lowercase__)
__UpperCAmelCase : str = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def A( self):
__UpperCAmelCase : str = '''cpu'''
__UpperCAmelCase : Optional[int] = self.get_dummy_components()
__UpperCAmelCase : Optional[Any] = self.pipeline_class(**lowercase__)
__UpperCAmelCase : Union[str, Any] = pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Dict = pipe(**self.get_dummy_inputs(lowercase__))
__UpperCAmelCase : Optional[int] = output.images
__UpperCAmelCase : List[Any] = pipe(
**self.get_dummy_inputs(lowercase__) , return_dict=lowercase__ , )[0]
__UpperCAmelCase : str = image[0, -3:, -3:, -1]
__UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}")
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCAmelCase : int = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def A( self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def A( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A( self):
__UpperCAmelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''')
__UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''')
__UpperCAmelCase : List[Any] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa)
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : Optional[int] = '''a hat'''
__UpperCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa)
pipe_prior.to(lowercase__)
__UpperCAmelCase : Any = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa)
__UpperCAmelCase : int = pipeline.to(lowercase__)
pipeline.set_progress_bar_config(disable=lowercase__)
__UpperCAmelCase : Dict = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCAmelCase , __UpperCAmelCase : Any = pipe_prior(
lowercase__ , generator=lowercase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__UpperCAmelCase : List[str] = pipeline(
lowercase__ , image=lowercase__ , mask_image=lowercase__ , image_embeds=lowercase__ , negative_image_embeds=lowercase__ , generator=lowercase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
__UpperCAmelCase : str = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__)
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __SCREAMING_SNAKE_CASE ( lowercase_ = "laptop" ) -> DataFrame:
'''simple docstring'''
__UpperCAmelCase : str = f"https://www.amazon.in/laptop/s?k={product}"
__UpperCAmelCase : Tuple = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
__UpperCAmelCase : Tuple = BeautifulSoup(requests.get(lowercase_ , headers=lowercase_ ).text )
# Initialize a Pandas dataframe with the column titles
__UpperCAmelCase : Tuple = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
__UpperCAmelCase : int = item.ha.text
__UpperCAmelCase : int = '''https://www.amazon.in/''' + item.ha.a['''href''']
__UpperCAmelCase : List[Any] = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
__UpperCAmelCase : Any = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
__UpperCAmelCase : Optional[int] = '''Not available'''
try:
__UpperCAmelCase : Any = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
__UpperCAmelCase : str = ''''''
try:
__UpperCAmelCase : Optional[Any] = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
__UpperCAmelCase : str = float('''nan''' )
except AttributeError:
pass
__UpperCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__UpperCAmelCase : Any = ''' '''
__UpperCAmelCase : Dict = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase = """headphones"""
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase :
def __init__( self , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : str = 1_3
__UpperCAmelCase : Union[str, Any] = 7
__UpperCAmelCase : str = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : int = True
__UpperCAmelCase : Dict = 9_9
__UpperCAmelCase : str = 3_2
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : Optional[Any] = 4
__UpperCAmelCase : Union[str, Any] = 3_7
__UpperCAmelCase : List[Any] = '''gelu'''
__UpperCAmelCase : List[Any] = 0.1
__UpperCAmelCase : int = 0.1
__UpperCAmelCase : Union[str, Any] = 5_1_2
__UpperCAmelCase : str = 1_6
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Optional[Any] = 0.0_2
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Any = 4
__UpperCAmelCase : List[Any] = None
def A( self):
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Any = None
if self.use_input_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[int] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = TFEsmModel(config=lowercase__)
__UpperCAmelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase : List[Any] = model(lowercase__)
__UpperCAmelCase : Tuple = [input_ids, input_mask]
__UpperCAmelCase : Optional[int] = model(lowercase__)
__UpperCAmelCase : Optional[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = TFEsmModel(config=lowercase__)
__UpperCAmelCase : Tuple = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
__UpperCAmelCase : Dict = model(lowercase__)
__UpperCAmelCase : Any = [input_ids, input_mask]
__UpperCAmelCase : Optional[Any] = model(lowercase__ , encoder_hidden_states=lowercase__)
# Also check the case where encoder outputs are not passed
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : int = TFEsmForMaskedLM(config=lowercase__)
__UpperCAmelCase : Optional[int] = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : Union[str, Any] = TFEsmForTokenClassification(config=lowercase__)
__UpperCAmelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__UpperCAmelCase : List[str] = model(lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : int = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : List[str] = False
_lowerCAmelCase : Dict = False
def A( self):
__UpperCAmelCase : Optional[Any] = TFEsmModelTester(self)
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__)
@slow
def A( self):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = TFEsmModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
@unittest.skip('''Protein models do not support embedding resizing.''')
def A( self):
pass
@unittest.skip('''Protein models do not support embedding resizing.''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(lowercase__)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__UpperCAmelCase : Any = model.get_bias()
assert isinstance(lowercase__ , lowercase__)
for k, v in name.items():
assert isinstance(lowercase__ , tf.Variable)
else:
__UpperCAmelCase : int = model.get_output_embeddings()
assert x is None
__UpperCAmelCase : List[Any] = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Tuple = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
__UpperCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]])
__UpperCAmelCase : Optional[Any] = model(lowercase__)[0]
__UpperCAmelCase : Optional[Any] = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape) , lowercase__)
# compare the actual values for a slice.
__UpperCAmelCase : int = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2))
@slow
def A( self):
__UpperCAmelCase : int = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''')
__UpperCAmelCase : Tuple = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
__UpperCAmelCase : Any = model(lowercase__)[0]
# compare the actual values for a slice.
__UpperCAmelCase : List[str] = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
import sys
lowerCAmelCase = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 1
for digit in s:
product *= int(lowercase_ )
return product
def __SCREAMING_SNAKE_CASE ( lowercase_ = N ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = -sys.maxsize - 1
__UpperCAmelCase : Optional[Any] = n[:13]
__UpperCAmelCase : List[str] = 13
while cur_index < len(lowercase_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__UpperCAmelCase : Tuple = substr[1:] + n[cur_index]
cur_index += 1
else:
__UpperCAmelCase : int = max(lowercase_ , str_eval(lowercase_ ) )
__UpperCAmelCase : List[Any] = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(lowercase_ , lowercase_ ):
raise ValueError('''Length must be a positive integer.''' )
return [n * (2 * n - 1) for n in range(lowercase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase = 10
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for i in range(lowercase_ , lowercase_ ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : str = len(lowercase_ )
while left <= right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = (left + right) // 3 + 1
__UpperCAmelCase : Dict = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__UpperCAmelCase : Any = one_third - 1
elif array[two_third] < target:
__UpperCAmelCase : Any = two_third + 1
else:
__UpperCAmelCase : Union[str, Any] = one_third + 1
__UpperCAmelCase : Any = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
__UpperCAmelCase : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase_ , one_third - 1 , lowercase_ , lowercase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase_ , lowercase_ , lowercase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase_ , lowercase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCAmelCase = ite_ternary_search(collection, target)
lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print("""Not found""")
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__UpperCAmelCase : Optional[int] = 6
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Union[str, Any] = 1901
__UpperCAmelCase : Dict = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__UpperCAmelCase : int = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__UpperCAmelCase : Dict = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__UpperCAmelCase : Dict = day - days_per_month[month - 2]
if month > 12:
year += 1
__UpperCAmelCase : str = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
lowerCAmelCase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
__UpperCAmelCase : Stack[int] = Stack()
__UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase_ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase_ )
elif i == ")":
# RULE 4
__UpperCAmelCase : Union[str, Any] = operator_stack.peek()
operator_stack.pop()
__UpperCAmelCase : List[Any] = operand_stack.peek()
operand_stack.pop()
__UpperCAmelCase : Optional[int] = operand_stack.peek()
operand_stack.pop()
__UpperCAmelCase : Optional[Any] = operators[opr](lowercase_ , lowercase_ )
operand_stack.push(lowercase_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=lowercase__).to(lowercase__)
__UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''google/mt5-small''')
__UpperCAmelCase : int = tokenizer('''Hello there''' , return_tensors='''pt''').input_ids
__UpperCAmelCase : Optional[Any] = tokenizer('''Hi I am''' , return_tensors='''pt''').input_ids
__UpperCAmelCase : Optional[Any] = model(input_ids.to(lowercase__) , labels=labels.to(lowercase__)).loss
__UpperCAmelCase : Dict = -(labels.shape[-1] * loss.item())
__UpperCAmelCase : Any = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_UpperCamelCase )} , )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowerCAmelCase : bool = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowerCAmelCase : bool = field(
default=_UpperCamelCase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowerCAmelCase : bool = field(default=_UpperCamelCase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowerCAmelCase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowerCAmelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowerCAmelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowerCAmelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowerCAmelCase : bool = field(
default=_UpperCamelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = None , ) -> Any:
'''simple docstring'''
def _dataset(lowercase_ , lowercase_=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=lowercase_ , file_path=lowercase_ , block_size=args.block_size , ref_path=lowercase_ , )
return LineByLineTextDataset(tokenizer=lowercase_ , file_path=lowercase_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowercase_ , file_path=lowercase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowercase_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowercase_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
__UpperCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , lowercase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__UpperCAmelCase : int = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__UpperCAmelCase : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__UpperCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__UpperCAmelCase : Tuple = AutoModelWithLMHead.from_config(lowercase_ )
model.resize_token_embeddings(len(lowercase_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__UpperCAmelCase : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__UpperCAmelCase : int = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__UpperCAmelCase : Tuple = (
get_dataset(lowercase_ , tokenizer=lowercase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__UpperCAmelCase : Dict = (
get_dataset(lowercase_ , tokenizer=lowercase_ , evaluate=lowercase_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__UpperCAmelCase : Optional[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=lowercase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__UpperCAmelCase : str = DataCollatorForWholeWordMask(
tokenizer=lowercase_ , mlm_probability=data_args.mlm_probability )
else:
__UpperCAmelCase : str = DataCollatorForLanguageModeling(
tokenizer=lowercase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__UpperCAmelCase : Dict = Trainer(
model=lowercase_ , args=lowercase_ , data_collator=lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , prediction_loss_only=lowercase_ , )
# Training
if training_args.do_train:
__UpperCAmelCase : List[Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowercase_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCAmelCase : List[str] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCAmelCase : Optional[int] = trainer.evaluate()
__UpperCAmelCase : Optional[int] = math.exp(eval_output['''eval_loss'''] )
__UpperCAmelCase : int = {'''perplexity''': perplexity}
__UpperCAmelCase : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(lowercase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowercase_ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(lowercase_ )
return results
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""Helsinki-NLP/opus-mt-en-de""": """https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json""",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''marian'''
_lowerCAmelCase : Optional[Any] = ['''past_key_values''']
_lowerCAmelCase : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , lowercase__=5_8_1_0_1 , lowercase__=None , lowercase__=1_0_2_4 , lowercase__=1_2 , lowercase__=4_0_9_6 , lowercase__=1_6 , lowercase__=1_2 , lowercase__=4_0_9_6 , lowercase__=1_6 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_0_2_4 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.0_2 , lowercase__=5_8_1_0_0 , lowercase__=False , lowercase__=5_8_1_0_0 , lowercase__=0 , lowercase__=0 , lowercase__=True , **lowercase__ , ):
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Optional[int] = decoder_vocab_size or vocab_size
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = d_model
__UpperCAmelCase : Dict = encoder_ffn_dim
__UpperCAmelCase : Any = encoder_layers
__UpperCAmelCase : str = encoder_attention_heads
__UpperCAmelCase : Any = decoder_ffn_dim
__UpperCAmelCase : List[Any] = decoder_layers
__UpperCAmelCase : Dict = decoder_attention_heads
__UpperCAmelCase : str = dropout
__UpperCAmelCase : List[str] = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Dict = activation_function
__UpperCAmelCase : str = init_std
__UpperCAmelCase : int = encoder_layerdrop
__UpperCAmelCase : Dict = decoder_layerdrop
__UpperCAmelCase : Any = use_cache
__UpperCAmelCase : Optional[int] = encoder_layers
__UpperCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Any = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
class lowerCamelCase ( _UpperCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A( self):
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
__UpperCAmelCase : int = {0: '''batch'''}
__UpperCAmelCase : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__UpperCAmelCase : Any = {0: '''batch''', 1: '''decoder_sequence'''}
__UpperCAmelCase : Union[str, Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction='''inputs''')
elif self.task == "causal-lm":
# TODO: figure this case out.
__UpperCAmelCase : List[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
])
if self.use_past:
__UpperCAmelCase , __UpperCAmelCase : Any = self.num_layers
for i in range(lowercase__):
__UpperCAmelCase : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__UpperCAmelCase : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
])
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A( self):
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : Optional[int] = super().outputs
else:
__UpperCAmelCase : List[str] = super(lowercase__ , self).outputs
if self.use_past:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.num_layers
for i in range(lowercase__):
__UpperCAmelCase : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
__UpperCAmelCase : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
__UpperCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__)
# Generate decoder inputs
__UpperCAmelCase : List[str] = seq_length if not self.use_past else 1
__UpperCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__)
__UpperCAmelCase : List[str] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__UpperCAmelCase : List[str] = dict(**lowercase__ , **lowercase__)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = common_inputs['''input_ids'''].shape
__UpperCAmelCase : Optional[Any] = common_inputs['''decoder_input_ids'''].shape[1]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.num_attention_heads
__UpperCAmelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCAmelCase : List[Any] = decoder_seq_length + 3
__UpperCAmelCase : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__UpperCAmelCase : int = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ , lowercase__)] , dim=1)
__UpperCAmelCase : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.num_layers
__UpperCAmelCase : Optional[int] = min(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[Any] = max(lowercase__ , lowercase__) - min_num_layers
__UpperCAmelCase : Any = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__),
torch.zeros(lowercase__),
torch.zeros(lowercase__),
torch.zeros(lowercase__),
))
# TODO: test this.
__UpperCAmelCase : Optional[int] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ , lowercase__):
common_inputs["past_key_values"].append((torch.zeros(lowercase__), torch.zeros(lowercase__)))
return common_inputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
__UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__)
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''')
else:
import torch
__UpperCAmelCase , __UpperCAmelCase : int = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase : Any = seqlen + 2
__UpperCAmelCase , __UpperCAmelCase : Dict = self.num_layers
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.num_attention_heads
__UpperCAmelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__UpperCAmelCase : Dict = common_inputs['''attention_mask'''].dtype
__UpperCAmelCase : List[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ , lowercase__ , dtype=lowercase__)] , dim=1)
__UpperCAmelCase : List[str] = [
(torch.zeros(lowercase__), torch.zeros(lowercase__)) for _ in range(lowercase__)
]
return common_inputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowercase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__UpperCAmelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(lowercase__)
__UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowercase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase__)
# Generate dummy inputs according to compute batch and sequence
__UpperCAmelCase : Dict = [''' '''.join([tokenizer.unk_token]) * seq_length] * batch_size
__UpperCAmelCase : Any = dict(tokenizer(lowercase__ , return_tensors=lowercase__))
return common_inputs
def A( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ):
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__)
else:
__UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__)
return common_inputs
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if self.task in ["default", "seq2seq-lm"]:
__UpperCAmelCase : Any = super()._flatten_past_key_values_(lowercase__ , lowercase__ , lowercase__ , lowercase__)
else:
__UpperCAmelCase : str = super(lowercase__ , self)._flatten_past_key_values_(
lowercase__ , lowercase__ , lowercase__ , lowercase__)
@property
def A( self):
return 1e-4
| 675 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar("""T""")
class lowerCamelCase ( Generic[T] ):
def __init__( self , lowercase__):
__UpperCAmelCase : Optional[int] = data
__UpperCAmelCase : str = self
__UpperCAmelCase : Optional[Any] = 0
class lowerCamelCase ( Generic[T] ):
def __init__( self):
# map from node name to the node object
__UpperCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def A( self , lowercase__):
# create a new set with x as its member
__UpperCAmelCase : Union[str, Any] = DisjointSetTreeNode(lowercase__)
def A( self , lowercase__):
# find the set x belongs to (with path-compression)
__UpperCAmelCase : Optional[int] = self.map[data]
if elem_ref != elem_ref.parent:
__UpperCAmelCase : Any = self.find_set(elem_ref.parent.data)
return elem_ref.parent
def A( self , lowercase__ , lowercase__):
# helper function for union operation
if nodea.rank > nodea.rank:
__UpperCAmelCase : Dict = nodea
else:
__UpperCAmelCase : Tuple = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def A( self , lowercase__ , lowercase__):
# merge 2 disjoint sets
self.link(self.find_set(lowercase__) , self.find_set(lowercase__))
class lowerCamelCase ( Generic[T] ):
def __init__( self):
# connections: map from the node to the neighbouring nodes (with weights)
__UpperCAmelCase : dict[T, dict[T, int]] = {}
def A( self , lowercase__):
# add a node ONLY if its not present in the graph
if node not in self.connections:
__UpperCAmelCase : str = {}
def A( self , lowercase__ , lowercase__ , lowercase__):
# add an edge with the given weight
self.add_node(lowercase__)
self.add_node(lowercase__)
__UpperCAmelCase : Tuple = weight
__UpperCAmelCase : Dict = weight
def A( self):
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start))
edges.append((start, end, self.connections[start][end]))
edges.sort(key=lambda lowercase__: x[2])
# creating the disjoint set
__UpperCAmelCase : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowercase__)
# MST generation
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Tuple = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections) - 1:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = edges[index]
index += 1
__UpperCAmelCase : List[str] = disjoint_set.find_set(lowercase__)
__UpperCAmelCase : int = disjoint_set.find_set(lowercase__)
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowercase__ , lowercase__ , lowercase__)
disjoint_set.union(lowercase__ , lowercase__)
return graph
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCAmelCase = get_logger(__name__)
lowerCAmelCase = R"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class lowerCamelCase :
@add_start_docstrings(lowercase__)
def __call__( self , lowercase__ , lowercase__):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called.")
class lowerCamelCase :
@add_start_docstrings(lowercase__)
def __call__( self , lowercase__ , lowercase__):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called.")
class lowerCamelCase ( _UpperCamelCase ):
@add_start_docstrings(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , **lowercase__):
for processor in self:
__UpperCAmelCase : str = inspect.signature(processor.__call__).parameters
if len(lowercase__) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys())} for "
F"{processor.__class__} are passed to the logits processor.")
__UpperCAmelCase : Optional[Any] = processor(lowercase__ , lowercase__ , lowercase__ , **lowercase__)
else:
__UpperCAmelCase : str = processor(lowercase__ , lowercase__ , lowercase__)
return scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__):
if not isinstance(lowercase__ , lowercase__) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}")
__UpperCAmelCase : Optional[int] = temperature
def __call__( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = scores / self.temperature
return scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ = -float('''Inf''') , lowercase__ = 1):
if not isinstance(lowercase__ , lowercase__) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}")
if not isinstance(lowercase__ , lowercase__) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
__UpperCAmelCase : str = top_p
__UpperCAmelCase : Optional[int] = filter_value
__UpperCAmelCase : Optional[int] = min_tokens_to_keep
def __call__( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase , __UpperCAmelCase : Dict = lax.top_k(lowercase__ , scores.shape[-1])
__UpperCAmelCase : Optional[int] = jnp.full_like(lowercase__ , self.filter_value)
__UpperCAmelCase : Union[str, Any] = jax.nn.softmax(lowercase__ , axis=-1).cumsum(axis=-1)
__UpperCAmelCase : Tuple = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__UpperCAmelCase : List[str] = jnp.roll(lowercase__ , 1)
score_mask |= score_mask.at[:, 0].set(lowercase__)
# min tokens to keep
__UpperCAmelCase : List[Any] = score_mask.at[:, : self.min_tokens_to_keep].set(lowercase__)
__UpperCAmelCase : Tuple = jnp.where(lowercase__ , lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = jax.lax.sort_key_val(lowercase__ , lowercase__)[-1]
return next_scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ = -float('''Inf''') , lowercase__ = 1):
if not isinstance(lowercase__ , lowercase__) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}")
__UpperCAmelCase : List[str] = max(lowercase__ , lowercase__)
__UpperCAmelCase : Union[str, Any] = filter_value
def __call__( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase , __UpperCAmelCase : Dict = scores.shape
__UpperCAmelCase : Optional[Any] = jnp.full(batch_size * vocab_size , self.filter_value)
__UpperCAmelCase : int = min(self.top_k , scores.shape[-1]) # Safety check
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = lax.top_k(lowercase__ , lowercase__)
__UpperCAmelCase : List[Any] = jnp.broadcast_to((jnp.arange(lowercase__) * vocab_size)[:, None] , (batch_size, topk)).flatten()
__UpperCAmelCase : Optional[Any] = topk_scores.flatten()
__UpperCAmelCase : str = topk_indices.flatten() + shift
__UpperCAmelCase : int = next_scores_flat.at[topk_indices_flat].set(lowercase__)
__UpperCAmelCase : List[Any] = next_scores_flat.reshape(lowercase__ , lowercase__)
return next_scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__):
__UpperCAmelCase : Dict = bos_token_id
def __call__( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = jnp.full(scores.shape , -float('''inf'''))
__UpperCAmelCase : List[str] = 1 - jnp.bool_(cur_len - 1)
__UpperCAmelCase : Dict = jnp.where(lowercase__ , new_scores.at[:, self.bos_token_id].set(0) , lowercase__)
return scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = max_length
__UpperCAmelCase : str = eos_token_id
def __call__( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[Any] = jnp.full(scores.shape , -float('''inf'''))
__UpperCAmelCase : int = 1 - jnp.bool_(cur_len - self.max_length + 1)
__UpperCAmelCase : Dict = jnp.where(lowercase__ , new_scores.at[:, self.eos_token_id].set(0) , lowercase__)
return scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__):
if not isinstance(lowercase__ , lowercase__) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}")
if not isinstance(lowercase__ , lowercase__) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
__UpperCAmelCase : List[Any] = min_length
__UpperCAmelCase : Optional[int] = eos_token_id
def __call__( self , lowercase__ , lowercase__ , lowercase__):
# create boolean flag to decide if min length penalty should be applied
__UpperCAmelCase : List[str] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1)
__UpperCAmelCase : List[str] = jnp.where(lowercase__ , scores.at[:, self.eos_token_id].set(-float('''inf''')) , lowercase__)
return scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__):
__UpperCAmelCase : int = list(lowercase__)
__UpperCAmelCase : str = begin_index
def __call__( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = 1 - jnp.bool_(cur_len - self.begin_index)
__UpperCAmelCase : Optional[Any] = jnp.where(lowercase__ , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''')) , lowercase__)
return scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = scores.at[..., self.suppress_tokens].set(-float('''inf'''))
return scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__):
__UpperCAmelCase : Any = dict(lowercase__)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__UpperCAmelCase : List[str] = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
__UpperCAmelCase : Optional[Any] = force_token_array.at[index].set(lowercase__)
__UpperCAmelCase : Optional[int] = jnp.intaa(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__):
def _force_token(lowercase__):
__UpperCAmelCase : Optional[Any] = scores.shape[0]
__UpperCAmelCase : Dict = self.force_token_array[generation_idx]
__UpperCAmelCase : Union[str, Any] = jnp.ones_like(lowercase__ , dtype=scores.dtype) * -float('''inf''')
__UpperCAmelCase : Tuple = jnp.zeros((batch_size, 1) , dtype=scores.dtype)
__UpperCAmelCase : Any = lax.dynamic_update_slice(lowercase__ , lowercase__ , (0, current_token))
return new_scores
__UpperCAmelCase : Tuple = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(lowercase__) , lambda: scores , ) , )
return scores
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = generate_config.eos_token_id
__UpperCAmelCase : str = generate_config.no_timestamps_token_id
__UpperCAmelCase : Optional[int] = generate_config.no_timestamps_token_id + 1
__UpperCAmelCase : List[Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowercase__ , '''max_initial_timestamp_index'''):
__UpperCAmelCase : Optional[int] = generate_config.max_initial_timestamp_index
else:
__UpperCAmelCase : Optional[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__UpperCAmelCase : Dict = model_config.vocab_size
def __call__( self , lowercase__ , lowercase__ , lowercase__):
# suppress <|notimestamps|> which is handled by without_timestamps
__UpperCAmelCase : Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float('''inf'''))
def handle_pairs(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , lowercase__ , lowercase__)
__UpperCAmelCase : int = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , lowercase__ , )
__UpperCAmelCase : Dict = jnp.where((cur_len - self.begin_index) < 2 , lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , lowercase__ , lowercase__ , )
return jnp.where(
lowercase__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''')) , scores_k.at[: self.eos_token_id].set(-float('''inf''')) , ) , lowercase__ , )
__UpperCAmelCase : Any = jax.vmap(lowercase__)(lowercase__ , lowercase__)
__UpperCAmelCase : Optional[int] = jnp.where(cur_len == self.begin_index , lowercase__ , lowercase__)
__UpperCAmelCase : str = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , lowercase__ , )
__UpperCAmelCase : List[Any] = self.timestamp_begin + self.max_initial_timestamp_index
__UpperCAmelCase : Union[str, Any] = jnp.where(
lowercase__ , scores.at[:, last_allowed + 1 :].set(-float('''inf''')) , lowercase__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
__UpperCAmelCase : Dict = jax.nn.log_softmax(lowercase__ , axis=-1)
def handle_cumulative_probs(lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1)
__UpperCAmelCase : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''')) , lowercase__ , )
__UpperCAmelCase : int = jax.vmap(lowercase__)(lowercase__ , lowercase__)
return scores
| 675 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = UniSpeechSatForSequenceClassification.from_pretrained(lowercase_ , config=lowercase_ )
__UpperCAmelCase : Dict = downstream_dict['''projector.weight''']
__UpperCAmelCase : List[str] = downstream_dict['''projector.bias''']
__UpperCAmelCase : Any = downstream_dict['''model.post_net.linear.weight''']
__UpperCAmelCase : Union[str, Any] = downstream_dict['''model.post_net.linear.bias''']
return model
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(lowercase_ , config=lowercase_ )
__UpperCAmelCase : Optional[Any] = downstream_dict['''model.linear.weight''']
__UpperCAmelCase : Optional[int] = downstream_dict['''model.linear.bias''']
return model
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = UniSpeechSatForXVector.from_pretrained(lowercase_ , config=lowercase_ )
__UpperCAmelCase : Tuple = downstream_dict['''connector.weight''']
__UpperCAmelCase : List[str] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCAmelCase : Dict = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__UpperCAmelCase : Optional[int] = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__UpperCAmelCase : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__UpperCAmelCase : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__UpperCAmelCase : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__UpperCAmelCase : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__UpperCAmelCase : int = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.load(lowercase_ , map_location='''cpu''' )
__UpperCAmelCase : List[Any] = checkpoint['''Downstream''']
__UpperCAmelCase : int = UniSpeechSatConfig.from_pretrained(lowercase_ )
__UpperCAmelCase : str = WavaVecaFeatureExtractor.from_pretrained(
lowercase_ , return_attention_mask=lowercase_ , do_normalize=lowercase_ )
__UpperCAmelCase : str = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__UpperCAmelCase : Optional[Any] = convert_classification(lowercase_ , lowercase_ , lowercase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__UpperCAmelCase : Tuple = convert_diarization(lowercase_ , lowercase_ , lowercase_ )
elif arch.endswith('''ForXVector''' ):
__UpperCAmelCase : Dict = convert_xvector(lowercase_ , lowercase_ , lowercase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__UpperCAmelCase : int = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowerCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 675 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""SenseTime/deformable-detr""": """https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json""",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''deformable_detr'''
_lowerCAmelCase : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowercase__=True , lowercase__=None , lowercase__=3 , lowercase__=3_0_0 , lowercase__=1_0_2_4 , lowercase__=6 , lowercase__=1_0_2_4 , lowercase__=8 , lowercase__=6 , lowercase__=1_0_2_4 , lowercase__=8 , lowercase__=0.0 , lowercase__=True , lowercase__="relu" , lowercase__=2_5_6 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.0_2 , lowercase__=1.0 , lowercase__=True , lowercase__=False , lowercase__="sine" , lowercase__="resnet50" , lowercase__=True , lowercase__=False , lowercase__=4 , lowercase__=4 , lowercase__=4 , lowercase__=False , lowercase__=3_0_0 , lowercase__=False , lowercase__=1 , lowercase__=5 , lowercase__=2 , lowercase__=1 , lowercase__=1 , lowercase__=5 , lowercase__=2 , lowercase__=0.1 , lowercase__=0.2_5 , lowercase__=False , **lowercase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
__UpperCAmelCase : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = backbone_config.get('''model_type''')
__UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
__UpperCAmelCase : Union[str, Any] = config_class.from_dict(lowercase__)
__UpperCAmelCase : List[Any] = use_timm_backbone
__UpperCAmelCase : Union[str, Any] = backbone_config
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : List[str] = num_queries
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : List[str] = encoder_ffn_dim
__UpperCAmelCase : str = encoder_layers
__UpperCAmelCase : Union[str, Any] = encoder_attention_heads
__UpperCAmelCase : Tuple = decoder_ffn_dim
__UpperCAmelCase : Any = decoder_layers
__UpperCAmelCase : Union[str, Any] = decoder_attention_heads
__UpperCAmelCase : int = dropout
__UpperCAmelCase : List[Any] = attention_dropout
__UpperCAmelCase : List[str] = activation_dropout
__UpperCAmelCase : Union[str, Any] = activation_function
__UpperCAmelCase : List[Any] = init_std
__UpperCAmelCase : Optional[int] = init_xavier_std
__UpperCAmelCase : Union[str, Any] = encoder_layerdrop
__UpperCAmelCase : List[str] = auxiliary_loss
__UpperCAmelCase : Any = position_embedding_type
__UpperCAmelCase : List[str] = backbone
__UpperCAmelCase : Union[str, Any] = use_pretrained_backbone
__UpperCAmelCase : List[Any] = dilation
# deformable attributes
__UpperCAmelCase : List[Any] = num_feature_levels
__UpperCAmelCase : Optional[Any] = encoder_n_points
__UpperCAmelCase : str = decoder_n_points
__UpperCAmelCase : Any = two_stage
__UpperCAmelCase : int = two_stage_num_proposals
__UpperCAmelCase : Optional[int] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''')
# Hungarian matcher
__UpperCAmelCase : Dict = class_cost
__UpperCAmelCase : List[Any] = bbox_cost
__UpperCAmelCase : Union[str, Any] = giou_cost
# Loss coefficients
__UpperCAmelCase : Tuple = mask_loss_coefficient
__UpperCAmelCase : List[Any] = dice_loss_coefficient
__UpperCAmelCase : int = bbox_loss_coefficient
__UpperCAmelCase : Optional[int] = giou_loss_coefficient
__UpperCAmelCase : Any = eos_coefficient
__UpperCAmelCase : List[Any] = focal_alpha
__UpperCAmelCase : List[str] = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase__ , **lowercase__)
@property
def A( self):
return self.encoder_attention_heads
@property
def A( self):
return self.d_model
def A( self):
__UpperCAmelCase : int = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
__UpperCAmelCase : List[str] = self.backbone_config.to_dict()
__UpperCAmelCase : int = self.__class__.model_type
return output
| 675 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""LayoutLMv2FeatureExtractor"""]
lowerCAmelCase = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import qiskit
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> qiskit.result.counts.Counts:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__UpperCAmelCase : List[str] = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__UpperCAmelCase : Tuple = qiskit.execute(lowercase_ , lowercase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = single_qubit_measure(2, 2)
print(F'Total count for various states are: {counts}')
| 675 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 1 |
import os
from collections.abc import Iterator
def __SCREAMING_SNAKE_CASE ( lowercase_ = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(lowercase_ ):
__UpperCAmelCase : str = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowercase_ )[1] in (".py", ".ipynb"):
yield os.path.join(lowercase_ , lowercase_ ).lstrip('''./''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
'''simple docstring'''
return f"{i * ' '}*" if i else "\n##"
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowercase_ ) or old_parts[i] != new_part) and new_part:
print(f"{md_prefix(lowercase_ )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def __SCREAMING_SNAKE_CASE ( lowercase_ = "." ) -> None:
'''simple docstring'''
__UpperCAmelCase : Any = ''''''
for filepath in sorted(good_file_paths(lowercase_ ) ):
__UpperCAmelCase , __UpperCAmelCase : str = os.path.split(lowercase_ )
if filepath != old_path:
__UpperCAmelCase : int = print_path(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = (filepath.count(os.sep ) + 1) if filepath else 0
__UpperCAmelCase : Optional[Any] = f"{filepath}/{filename}".replace(''' ''' , '''%20''' )
__UpperCAmelCase : List[Any] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f"{md_prefix(lowercase_ )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md(""".""")
| 675 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = to_pil_image(lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = pil_image.size
__UpperCAmelCase : Any = pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__UpperCAmelCase : Any = [idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
__UpperCAmelCase : Any = [word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
__UpperCAmelCase : int = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
__UpperCAmelCase : List[Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
__UpperCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
__UpperCAmelCase : Any = [coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__UpperCAmelCase : Tuple = []
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
__UpperCAmelCase : str = [x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
__UpperCAmelCase : Any = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[Any] = ['''pixel_values''']
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = True , lowercase__ = 1 / 2_5_5 , lowercase__ = True , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = "" , **lowercase__ , ):
super().__init__(**lowercase__)
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__UpperCAmelCase : Optional[Any] = get_size_dict(lowercase__)
__UpperCAmelCase : int = do_resize
__UpperCAmelCase : List[Any] = size
__UpperCAmelCase : Any = resample
__UpperCAmelCase : int = do_rescale
__UpperCAmelCase : str = rescale_value
__UpperCAmelCase : str = do_normalize
__UpperCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
__UpperCAmelCase : Optional[int] = apply_ocr
__UpperCAmelCase : Tuple = ocr_lang
__UpperCAmelCase : Optional[Any] = tesseract_config
def A( self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BILINEAR , lowercase__ = None , **lowercase__ , ):
__UpperCAmelCase : Optional[int] = get_size_dict(lowercase__)
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}")
__UpperCAmelCase : List[Any] = (size['''height'''], size['''width'''])
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__)
def A( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
__UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Tuple = size if size is not None else self.size
__UpperCAmelCase : Optional[int] = get_size_dict(lowercase__)
__UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample
__UpperCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
__UpperCAmelCase : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
__UpperCAmelCase : str = tesseract_config if tesseract_config is not None else self.tesseract_config
__UpperCAmelCase : Union[str, Any] = make_list_of_images(lowercase__)
if not valid_images(lowercase__):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''')
# All transformations expect numpy arrays.
__UpperCAmelCase : Any = [to_numpy_array(lowercase__) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''')
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Optional[Any] = []
for image in images:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = apply_tesseract(lowercase__ , lowercase__ , lowercase__)
words_batch.append(lowercase__)
boxes_batch.append(lowercase__)
if do_resize:
__UpperCAmelCase : int = [self.resize(image=lowercase__ , size=lowercase__ , resample=lowercase__) for image in images]
if do_rescale:
__UpperCAmelCase : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__) for image in images]
if do_normalize:
__UpperCAmelCase : Tuple = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__) for image in images]
__UpperCAmelCase : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__) for image in images]
__UpperCAmelCase : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowercase__)
if apply_ocr:
__UpperCAmelCase : str = words_batch
__UpperCAmelCase : Any = boxes_batch
return data
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : str = '''umt5'''
_lowerCAmelCase : Optional[Any] = ['''past_key_values''']
def __init__( self , lowercase__=2_5_0_1_1_2 , lowercase__=5_1_2 , lowercase__=6_4 , lowercase__=1_0_2_4 , lowercase__=8 , lowercase__=None , lowercase__=6 , lowercase__=3_2 , lowercase__=1_2_8 , lowercase__=0.1 , lowercase__=1e-6 , lowercase__=1.0 , lowercase__="gated-gelu" , lowercase__=True , lowercase__=True , lowercase__="T5Tokenizer" , lowercase__=True , lowercase__=0 , lowercase__=1 , lowercase__=0 , **lowercase__ , ):
super().__init__(
is_encoder_decoder=lowercase__ , tokenizer_class=lowercase__ , tie_word_embeddings=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : str = d_model
__UpperCAmelCase : str = d_kv
__UpperCAmelCase : Dict = d_ff
__UpperCAmelCase : Dict = num_layers
__UpperCAmelCase : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase : str = num_heads
__UpperCAmelCase : int = relative_attention_num_buckets
__UpperCAmelCase : List[str] = relative_attention_max_distance
__UpperCAmelCase : Tuple = dropout_rate
__UpperCAmelCase : Optional[int] = layer_norm_epsilon
__UpperCAmelCase : str = initializer_factor
__UpperCAmelCase : List[str] = feed_forward_proj
__UpperCAmelCase : Any = use_cache
__UpperCAmelCase : int = self.feed_forward_proj.split('''-''')
__UpperCAmelCase : str = act_info[-1]
__UpperCAmelCase : Tuple = act_info[0] == '''gated'''
if len(lowercase__) > 1 and act_info[0] != "gated" or len(lowercase__) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase : Union[str, Any] = '''gelu_new'''
@property
def A( self):
return self.d_model
@property
def A( self):
return self.num_heads
@property
def A( self):
return self.num_layers
class lowerCamelCase ( _UpperCamelCase ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A( self):
__UpperCAmelCase : Dict = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
__UpperCAmelCase : Optional[int] = '''past_encoder_sequence + sequence'''
__UpperCAmelCase : int = {0: '''batch'''}
__UpperCAmelCase : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__UpperCAmelCase : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
__UpperCAmelCase : str = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ , direction='''inputs''')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A( self):
return 1_3
@property
def A( self):
return 5e-4
| 675 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import math
import sys
import cva
import numpy as np
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = math.sqrt(lowercase_ )
__UpperCAmelCase : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase : Dict = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowercase_ ):
for j in range(0 , lowercase_ ):
__UpperCAmelCase : List[str] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowercase_ , lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> np.ndarray:
'''simple docstring'''
__UpperCAmelCase : List[str] = np.zeros(img.shape )
__UpperCAmelCase : Tuple = get_gauss_kernel(lowercase_ , lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__UpperCAmelCase : Any = get_slice(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCAmelCase : Optional[int] = img_s - img_s[kernel_size // 2, kernel_size // 2]
__UpperCAmelCase : List[str] = vec_gaussian(lowercase_ , lowercase_ )
__UpperCAmelCase : Tuple = np.multiply(lowercase_ , lowercase_ )
__UpperCAmelCase : Tuple = np.multiply(lowercase_ , lowercase_ )
__UpperCAmelCase : Union[str, Any] = np.sum(lowercase_ ) / np.sum(lowercase_ )
__UpperCAmelCase : Any = val
return imga
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> tuple:
'''simple docstring'''
__UpperCAmelCase : int = args[1] if args[1:] else '''../image_data/lena.jpg'''
__UpperCAmelCase : Optional[int] = float(args[2] ) if args[2:] else 1.0
__UpperCAmelCase : Dict = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__UpperCAmelCase : Union[str, Any] = int(args[4] )
__UpperCAmelCase : Union[str, Any] = kernel_size + abs(kernel_size % 2 - 1 )
else:
__UpperCAmelCase : List[Any] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = parse_args(sys.argv)
lowerCAmelCase = cva.imread(filename, 0)
cva.imshow("""input image""", img)
lowerCAmelCase = img / 255
lowerCAmelCase = out.astype("""float32""")
lowerCAmelCase = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCAmelCase = out * 255
lowerCAmelCase = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = True , lowercase__ = "arrow" , **lowercase__ , ):
super().__init__(
split=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , **lowercase__ , )
__UpperCAmelCase : Union[str, Any] = load_from_cache_file
__UpperCAmelCase : Dict = file_format
__UpperCAmelCase : Tuple = Spark(
df=lowercase__ , features=lowercase__ , cache_dir=lowercase__ , working_dir=lowercase__ , **lowercase__ , )
def A( self):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
__UpperCAmelCase : Optional[int] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=3_2 , lowercase__=2 , lowercase__=3 , lowercase__=1_6 , lowercase__=[1, 2, 1] , lowercase__=[2, 2, 4] , lowercase__=2 , lowercase__=2.0 , lowercase__=True , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__="gelu" , lowercase__=False , lowercase__=True , lowercase__=0.0_2 , lowercase__=1e-5 , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=1_0 , lowercase__=8 , ):
__UpperCAmelCase : List[str] = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : Dict = patch_size
__UpperCAmelCase : Dict = num_channels
__UpperCAmelCase : List[str] = embed_dim
__UpperCAmelCase : int = depths
__UpperCAmelCase : Optional[int] = num_heads
__UpperCAmelCase : str = window_size
__UpperCAmelCase : Optional[int] = mlp_ratio
__UpperCAmelCase : Tuple = qkv_bias
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : int = drop_path_rate
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : int = use_absolute_embeddings
__UpperCAmelCase : Any = patch_norm
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : str = is_training
__UpperCAmelCase : str = scope
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : Tuple = encoder_stride
def A( self):
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Any = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def A( self):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = SwinvaModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Tuple = model(lowercase__)
__UpperCAmelCase : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__UpperCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = SwinvaForMaskedImageModeling(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__UpperCAmelCase : str = 1
__UpperCAmelCase : Tuple = SwinvaForMaskedImageModeling(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCAmelCase : Dict = model(lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = self.type_sequence_label_size
__UpperCAmelCase : int = SwinvaForImageClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def A( self):
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = config_and_inputs
__UpperCAmelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Any = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_lowerCAmelCase : Union[str, Any] = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Dict = False
_lowerCAmelCase : str = False
_lowerCAmelCase : Optional[int] = False
def A( self):
__UpperCAmelCase : str = SwinvaModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , embed_dim=3_7)
def A( self):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''')
def A( self):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''')
def A( self):
pass
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(lowercase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , nn.Linear))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(lowercase__)
__UpperCAmelCase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Dict = model_class(lowercase__)
model.to(lowercase__)
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(lowercase__ , lowercase__))
__UpperCAmelCase : Tuple = outputs.attentions
__UpperCAmelCase : Dict = len(self.model_tester.depths)
self.assertEqual(len(lowercase__) , lowercase__)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Optional[Any] = config.window_size**2
__UpperCAmelCase : Tuple = model_class(lowercase__)
model.to(lowercase__)
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(lowercase__ , lowercase__))
__UpperCAmelCase : Optional[int] = outputs.attentions
self.assertEqual(len(lowercase__) , lowercase__)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase : int = len(lowercase__)
# Check attention is always last and order is fine
__UpperCAmelCase : Dict = True
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : int = model_class(lowercase__)
model.to(lowercase__)
model.eval()
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(lowercase__ , lowercase__))
if hasattr(self.model_tester , '''num_hidden_states_types'''):
__UpperCAmelCase : str = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase : List[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(lowercase__))
__UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(lowercase__) , lowercase__)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = model_class(lowercase__)
model.to(lowercase__)
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(lowercase__ , lowercase__))
__UpperCAmelCase : Optional[int] = outputs.hidden_states
__UpperCAmelCase : str = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowercase__) , lowercase__)
# Swinv2 has a different seq_length
__UpperCAmelCase : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(lowercase__) , lowercase__)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = reshaped_hidden_states[0].shape
__UpperCAmelCase : Union[str, Any] = (
reshaped_hidden_states[0].view(lowercase__ , lowercase__ , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = True
self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , lowercase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
self.check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ , (padded_height, padded_width))
def A( self):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__)
@slow
def A( self):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = SwinvaModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = _config_zero_init(lowercase__)
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = model_class(config=lowercase__)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''')
if is_vision_available()
else None
)
@slow
def A( self):
__UpperCAmelCase : Any = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''').to(
lowercase__)
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__UpperCAmelCase : List[Any] = image_processor(images=lowercase__ , return_tensors='''pt''').to(lowercase__)
# forward pass
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**lowercase__)
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowercase__)
__UpperCAmelCase : Any = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4))
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = 0
for ch in input_str:
__UpperCAmelCase : Union[str, Any] = ord(lowercase_ )
__UpperCAmelCase : str = pow(2 , lowercase_ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError('''String lengths must match!''' )
__UpperCAmelCase : int = 0
for chara, chara in zip(lowercase_ , lowercase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__UpperCAmelCase : int = flax_key_tuple[:-1] + ('''weight''',)
__UpperCAmelCase : Optional[Any] = torch.permute(lowercase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowercase_ ):
# linear layer
__UpperCAmelCase : List[str] = flax_key_tuple[:-1] + ('''weight''',)
__UpperCAmelCase : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__UpperCAmelCase : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if "metadata" in layer:
__UpperCAmelCase : Any = layer.split('''metadata''' )
__UpperCAmelCase : Optional[int] = ''''''.join(split_layer[0] )[:-1]
__UpperCAmelCase : List[str] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__UpperCAmelCase : Any = layer.split('''kvstore''' )
__UpperCAmelCase : int = ''''''.join(split_layer[0] )[:-1]
__UpperCAmelCase : List[str] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__UpperCAmelCase : List[str] = layer.split('''/''' )
__UpperCAmelCase : Union[str, Any] = '''/'''.join(split_layer[:-1] )
__UpperCAmelCase : Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
__UpperCAmelCase : str = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
__UpperCAmelCase : Tuple = '''file'''
else:
__UpperCAmelCase : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = rename_keys(lowercase_ )
__UpperCAmelCase : Any = {}
for k, v in current_block.items():
__UpperCAmelCase : List[Any] = v
__UpperCAmelCase : int = new_current_block
torch.save(lowercase_ , lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = WEIGHTS_NAME ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = convert_file_size_to_int(lowercase_ )
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : int = {}
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : Dict = 0
os.makedirs(lowercase_ , exist_ok=lowercase_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
__UpperCAmelCase : Optional[int] = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__UpperCAmelCase : List[Any] = flatten_dict(lowercase_ , sep='''/''' )
__UpperCAmelCase : Dict = {}
for layer in checkpoint_info.keys():
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_key_and_tensorstore_dict(
lowercase_ , lowercase_ , lowercase_ )
if curr_real_layer_name in all_layers:
__UpperCAmelCase : List[str] = content
else:
__UpperCAmelCase : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__UpperCAmelCase : Dict = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__UpperCAmelCase : Optional[int] = torch.tensor(lowercase_ )
__UpperCAmelCase : Tuple = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__UpperCAmelCase , __UpperCAmelCase : Tuple = rename_base_flax_keys(tuple(key.split('''/''' ) ) , lowercase_ )
__UpperCAmelCase : Dict = '''/'''.join(lowercase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__UpperCAmelCase : int = os.path.join(
lowercase_ , weights_name.replace('''.bin''' , f"-{len(lowercase_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__UpperCAmelCase : List[Any] = {}
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Optional[Any] = raw_weights.to(getattr(lowercase_ , lowercase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__UpperCAmelCase : Any = os.path.join(lowercase_ , weights_name.replace('''.bin''' , f"-{len(lowercase_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(lowercase_ , lowercase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowercase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__UpperCAmelCase : str = {}
__UpperCAmelCase : str = {}
for idx, shard in enumerate(lowercase_ ):
__UpperCAmelCase : List[Any] = weights_name.replace(
'''.bin''' , f"-{idx+1:05d}-of-{len(lowercase_ ):05d}.bin" ) # len(sharded_state_dicts):05d}
__UpperCAmelCase : str = os.path.join(lowercase_ , weights_name.replace('''.bin''' , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(lowercase_ , os.path.join(lowercase_ , lowercase_ ) )
__UpperCAmelCase : int = shard
for key in shard:
__UpperCAmelCase : List[str] = shard_file
# Add the metadata
__UpperCAmelCase : Optional[Any] = {'''total_size''': total_size}
__UpperCAmelCase : Tuple = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowercase_ , lowercase_ ) , '''w''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase : Dict = json.dumps(lowercase_ , indent=2 , sort_keys=lowercase_ ) + '''\n'''
f.write(lowercase_ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCAmelCase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__UpperCAmelCase : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
__UpperCAmelCase : List[str] = TaTokenizer.from_pretrained('''t5-small''' )
__UpperCAmelCase : List[str] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__UpperCAmelCase : Optional[Any] = tokenizer(lowercase_ , return_tensors='''pt''' ).input_ids
__UpperCAmelCase : Optional[int] = model.generate(lowercase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
for char in word:
__UpperCAmelCase : Optional[int] = ord(lowercase_ )
if not _is_chinese_char(lowercase_ ):
return 0
return 1
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = set()
for token in tokens:
__UpperCAmelCase : int = len(lowercase_ ) > 1 and is_chinese(lowercase_ )
if chinese_word:
word_set.add(lowercase_ )
__UpperCAmelCase : Union[str, Any] = list(lowercase_ )
return word_list
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__UpperCAmelCase : List[str] = max([len(lowercase_ ) for w in chinese_word_set] )
__UpperCAmelCase : Tuple = bert_tokens
__UpperCAmelCase , __UpperCAmelCase : List[Any] = 0, len(lowercase_ )
while start < end:
__UpperCAmelCase : List[str] = True
if is_chinese(bert_word[start] ):
__UpperCAmelCase : Tuple = min(end - start , lowercase_ )
for i in range(lowercase_ , 1 , -1 ):
__UpperCAmelCase : Optional[Any] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__UpperCAmelCase : Optional[Any] = '''##''' + bert_word[j]
__UpperCAmelCase : str = start + i
__UpperCAmelCase : List[Any] = False
break
if single_word:
start += 1
return bert_word
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = []
for i in range(0 , len(lowercase_ ) , 100 ):
__UpperCAmelCase : List[str] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__UpperCAmelCase : List[Any] = [get_chinese_word(lowercase_ ) for r in res]
ltp_res.extend(lowercase_ )
assert len(lowercase_ ) == len(lowercase_ )
__UpperCAmelCase : List[str] = []
for i in range(0 , len(lowercase_ ) , 100 ):
__UpperCAmelCase : Any = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowercase_ , truncation=lowercase_ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(lowercase_ ) == len(lowercase_ )
__UpperCAmelCase : List[str] = []
for input_ids, chinese_word in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase : Optional[int] = []
for id in input_ids:
__UpperCAmelCase : str = bert_tokenizer._convert_id_to_token(lowercase_ )
input_tokens.append(lowercase_ )
__UpperCAmelCase : int = add_sub_symbol(lowercase_ , lowercase_ )
__UpperCAmelCase : Optional[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase_ ):
if token[:2] == "##":
__UpperCAmelCase : List[str] = token[2:]
# save chinese tokens' pos
if len(lowercase_ ) == 1 and _is_chinese_char(ord(lowercase_ ) ):
ref_id.append(lowercase_ )
ref_ids.append(lowercase_ )
assert len(lowercase_ ) == len(lowercase_ )
return ref_ids
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase : List[Any] = f.readlines()
__UpperCAmelCase : Tuple = [line.strip() for line in data if len(lowercase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
__UpperCAmelCase : Dict = BertTokenizer.from_pretrained(args.bert )
__UpperCAmelCase : Tuple = prepare_ref(lowercase_ , lowercase_ , lowercase_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
__UpperCAmelCase : str = [json.dumps(lowercase_ ) + '''\n''' for ref in ref_ids]
f.writelines(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
lowerCAmelCase = parser.parse_args()
main(args)
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = ['''image_processor''', '''tokenizer''']
_lowerCAmelCase : Optional[int] = '''LayoutLMv2ImageProcessor'''
_lowerCAmelCase : str = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__):
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__UpperCAmelCase : List[Any] = kwargs.pop('''feature_extractor''')
__UpperCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(lowercase__ , lowercase__)
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''')
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''')
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''')
# first, apply the image processor
__UpperCAmelCase : str = self.image_processor(images=lowercase__ , return_tensors=lowercase__)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
__UpperCAmelCase : Optional[int] = features['''words''']
__UpperCAmelCase : Any = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel values
__UpperCAmelCase : int = features.pop('''pixel_values''')
if return_overflowing_tokens is True:
__UpperCAmelCase : str = self.get_overflowing_images(lowercase__ , encoded_inputs['''overflow_to_sample_mapping'''])
__UpperCAmelCase : int = images
return encoded_inputs
def A( self , lowercase__ , lowercase__):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__UpperCAmelCase : Dict = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(lowercase__) != len(lowercase__):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F" {len(lowercase__)} and {len(lowercase__)}")
return images_with_overflow
def A( self , *lowercase__ , **lowercase__):
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__)
def A( self , *lowercase__ , **lowercase__):
return self.tokenizer.decode(*lowercase__ , **lowercase__)
@property
def A( self):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A( self):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def A( self):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 675 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = (DEISMultistepScheduler,)
_lowerCAmelCase : List[Any] = (('''num_inference_steps''', 2_5),)
def A( self , **lowercase__):
__UpperCAmelCase : Union[str, Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**lowercase__)
return config
def A( self , lowercase__=0 , **lowercase__):
__UpperCAmelCase : List[str] = dict(self.forward_default_kwargs)
__UpperCAmelCase : Optional[int] = kwargs.pop('''num_inference_steps''' , lowercase__)
__UpperCAmelCase : int = self.dummy_sample
__UpperCAmelCase : Union[str, Any] = 0.1 * sample
__UpperCAmelCase : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : List[str] = self.get_scheduler_config(**lowercase__)
__UpperCAmelCase : int = scheduler_class(**lowercase__)
scheduler.set_timesteps(lowercase__)
# copy over dummy past residuals
__UpperCAmelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__)
__UpperCAmelCase : str = scheduler_class.from_pretrained(lowercase__)
new_scheduler.set_timesteps(lowercase__)
# copy over dummy past residuals
__UpperCAmelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = sample, sample
for t in range(lowercase__ , time_step + scheduler.config.solver_order + 1):
__UpperCAmelCase : Any = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__).prev_sample
__UpperCAmelCase : Optional[Any] = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def A( self):
pass
def A( self , lowercase__=0 , **lowercase__):
__UpperCAmelCase : Optional[int] = dict(self.forward_default_kwargs)
__UpperCAmelCase : List[str] = kwargs.pop('''num_inference_steps''' , lowercase__)
__UpperCAmelCase : Tuple = self.dummy_sample
__UpperCAmelCase : str = 0.1 * sample
__UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : List[str] = self.get_scheduler_config()
__UpperCAmelCase : List[Any] = scheduler_class(**lowercase__)
scheduler.set_timesteps(lowercase__)
# copy over dummy past residuals (must be after setting timesteps)
__UpperCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__)
__UpperCAmelCase : List[Any] = scheduler_class.from_pretrained(lowercase__)
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__)
# copy over dummy past residual (must be after setting timesteps)
__UpperCAmelCase : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCAmelCase : Tuple = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__).prev_sample
__UpperCAmelCase : str = new_scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
def A( self , lowercase__=None , **lowercase__):
if scheduler is None:
__UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
__UpperCAmelCase : List[Any] = self.get_scheduler_config(**lowercase__)
__UpperCAmelCase : List[Any] = scheduler_class(**lowercase__)
__UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCAmelCase : List[str] = self.get_scheduler_config(**lowercase__)
__UpperCAmelCase : int = scheduler_class(**lowercase__)
__UpperCAmelCase : List[str] = 1_0
__UpperCAmelCase : List[Any] = self.dummy_model()
__UpperCAmelCase : str = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__)
for i, t in enumerate(scheduler.timesteps):
__UpperCAmelCase : Dict = model(lowercase__ , lowercase__)
__UpperCAmelCase : Dict = scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
return sample
def A( self):
__UpperCAmelCase : Any = dict(self.forward_default_kwargs)
__UpperCAmelCase : Any = kwargs.pop('''num_inference_steps''' , lowercase__)
for scheduler_class in self.scheduler_classes:
__UpperCAmelCase : Optional[int] = self.get_scheduler_config()
__UpperCAmelCase : List[Any] = scheduler_class(**lowercase__)
__UpperCAmelCase : Tuple = self.dummy_sample
__UpperCAmelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ , '''set_timesteps'''):
scheduler.set_timesteps(lowercase__)
elif num_inference_steps is not None and not hasattr(lowercase__ , '''set_timesteps'''):
__UpperCAmelCase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCAmelCase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__UpperCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
__UpperCAmelCase : List[str] = scheduler.timesteps[5]
__UpperCAmelCase : str = scheduler.timesteps[6]
__UpperCAmelCase : int = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__).prev_sample
__UpperCAmelCase : List[Any] = scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def A( self):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__UpperCAmelCase : Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config())
__UpperCAmelCase : Optional[Any] = self.full_loop(scheduler=lowercase__)
__UpperCAmelCase : Tuple = torch.mean(torch.abs(lowercase__))
assert abs(result_mean.item() - 0.2_3_9_1_6) < 1e-3
__UpperCAmelCase : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config)
__UpperCAmelCase : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config)
__UpperCAmelCase : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config)
__UpperCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config)
__UpperCAmelCase : Tuple = self.full_loop(scheduler=lowercase__)
__UpperCAmelCase : str = torch.mean(torch.abs(lowercase__))
assert abs(result_mean.item() - 0.2_3_9_1_6) < 1e-3
def A( self):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase__)
def A( self):
self.check_over_configs(thresholding=lowercase__)
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , algorithm_type='''deis''' , solver_order=lowercase__ , solver_type=lowercase__ , )
def A( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__)
def A( self):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , algorithm_type=lowercase__ , )
__UpperCAmelCase : str = self.full_loop(
solver_order=lowercase__ , solver_type=lowercase__ , prediction_type=lowercase__ , algorithm_type=lowercase__ , )
assert not torch.isnan(lowercase__).any(), "Samples have nan numbers"
def A( self):
self.check_over_configs(lower_order_final=lowercase__)
self.check_over_configs(lower_order_final=lowercase__)
def A( self):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowercase__ , time_step=0)
def A( self):
__UpperCAmelCase : Any = self.full_loop()
__UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowercase__))
assert abs(result_mean.item() - 0.2_3_9_1_6) < 1e-3
def A( self):
__UpperCAmelCase : List[Any] = self.full_loop(prediction_type='''v_prediction''')
__UpperCAmelCase : Tuple = torch.mean(torch.abs(lowercase__))
assert abs(result_mean.item() - 0.0_9_1) < 1e-3
def A( self):
__UpperCAmelCase : str = self.scheduler_classes[0]
__UpperCAmelCase : Any = self.get_scheduler_config(thresholding=lowercase__ , dynamic_thresholding_ratio=0)
__UpperCAmelCase : Dict = scheduler_class(**lowercase__)
__UpperCAmelCase : str = 1_0
__UpperCAmelCase : Union[str, Any] = self.dummy_model()
__UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase__)
for i, t in enumerate(scheduler.timesteps):
__UpperCAmelCase : Optional[Any] = model(lowercase__ , lowercase__)
__UpperCAmelCase : Dict = scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
assert sample.dtype == torch.floataa
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"deit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"deit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"deit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"deit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"deit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"deit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"deit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"deit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"deit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"deit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
__UpperCAmelCase : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__UpperCAmelCase : List[str] = ''''''
else:
__UpperCAmelCase : List[Any] = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCAmelCase : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
__UpperCAmelCase : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
__UpperCAmelCase : str = in_proj_bias[: config.hidden_size]
__UpperCAmelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCAmelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = dct.pop(lowercase_ )
__UpperCAmelCase : Any = val
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Union[str, Any] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Dict = DeiTConfig()
# all deit models have fine-tuned heads
__UpperCAmelCase : List[str] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase : Tuple = 1000
__UpperCAmelCase : str = '''huggingface/label-files'''
__UpperCAmelCase : Tuple = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : List[Any] = {int(lowercase_ ): v for k, v in idalabel.items()}
__UpperCAmelCase : Union[str, Any] = idalabel
__UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Any = int(deit_name[-6:-4] )
__UpperCAmelCase : List[str] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
__UpperCAmelCase : Tuple = 192
__UpperCAmelCase : Union[str, Any] = 768
__UpperCAmelCase : Optional[int] = 12
__UpperCAmelCase : str = 3
elif deit_name[9:].startswith('''small''' ):
__UpperCAmelCase : Tuple = 384
__UpperCAmelCase : List[Any] = 1536
__UpperCAmelCase : Optional[int] = 12
__UpperCAmelCase : Union[str, Any] = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
__UpperCAmelCase : Optional[Any] = 1024
__UpperCAmelCase : Union[str, Any] = 4096
__UpperCAmelCase : Optional[int] = 24
__UpperCAmelCase : Optional[int] = 16
# load original model from timm
__UpperCAmelCase : Union[str, Any] = timm.create_model(lowercase_ , pretrained=lowercase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__UpperCAmelCase : Tuple = timm_model.state_dict()
__UpperCAmelCase : Optional[Any] = create_rename_keys(lowercase_ , lowercase_ )
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
read_in_q_k_v(lowercase_ , lowercase_ , lowercase_ )
# load HuggingFace model
__UpperCAmelCase : Tuple = DeiTForImageClassificationWithTeacher(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
__UpperCAmelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
__UpperCAmelCase : Any = DeiTImageProcessor(size=lowercase_ , crop_size=config.image_size )
__UpperCAmelCase : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : List[str] = encoding['''pixel_values''']
__UpperCAmelCase : Optional[Any] = model(lowercase_ )
__UpperCAmelCase : int = timm_model(lowercase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase_ , outputs.logits , atol=1e-3 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {deit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--deit_name""",
default="""vit_deit_base_distilled_patch16_224""",
type=str,
help="""Name of the DeiT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCAmelCase = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 675 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class lowerCamelCase ( datasets.BuilderConfig ):
_lowerCAmelCase : Optional[datasets.Features] = None
class lowerCamelCase ( datasets.ArrowBasedBuilder ):
_lowerCAmelCase : Union[str, Any] = PandasConfig
def A( self):
return datasets.DatasetInfo(features=self.config.features)
def A( self , lowercase__):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
__UpperCAmelCase : int = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowercase__ , (str, list, tuple)):
__UpperCAmelCase : str = data_files
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCAmelCase : List[Any] = [dl_manager.iter_files(lowercase__) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
__UpperCAmelCase : List[str] = []
for split_name, files in data_files.items():
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__UpperCAmelCase : Dict = [dl_manager.iter_files(lowercase__) for file in files]
splits.append(datasets.SplitGenerator(name=lowercase__ , gen_kwargs={'''files''': files}))
return splits
def A( self , lowercase__):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__UpperCAmelCase : str = table_cast(lowercase__ , self.config.features.arrow_schema)
return pa_table
def A( self , lowercase__):
for i, file in enumerate(itertools.chain.from_iterable(lowercase__)):
with open(lowercase__ , '''rb''') as f:
__UpperCAmelCase : Any = pa.Table.from_pandas(pd.read_pickle(lowercase__))
yield i, self._cast_table(lowercase__)
| 675 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Union[str, Any] = '''roformer'''
def __init__( self , lowercase__=5_0_0_0_0 , lowercase__=None , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1_5_3_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__=False , lowercase__=True , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Union[str, Any] = hidden_size if embedding_size is None else embedding_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : List[str] = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = rotary_value
__UpperCAmelCase : Dict = use_cache
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : str = {0: '''batch''', 1: '''sequence'''}
__UpperCAmelCase : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 1 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCAmelCase = """src/transformers"""
lowerCAmelCase = """docs/source/en/tasks"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
with open(lowercase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase : int = f.readlines()
# Find the start prompt.
__UpperCAmelCase : str = 0
while not lines[start_index].startswith(lowercase_ ):
start_index += 1
start_index += 1
__UpperCAmelCase : str = start_index
while not lines[end_index].startswith(lowercase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
lowerCAmelCase = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCAmelCase = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = TASK_GUIDE_TO_MODELS[task_guide]
__UpperCAmelCase : Optional[int] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase_ , set() )
__UpperCAmelCase : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"[{name}](../model_doc/{code})" for code, name in model_names.items()] ) + "\n"
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=False ) -> int:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = _find_text_in_file(
filename=os.path.join(lowercase_ , lowercase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__UpperCAmelCase : int = get_model_list_for_task(lowercase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowercase_ , lowercase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"
''' to fix this.''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 675 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase = False
class lowerCamelCase ( unittest.TestCase ):
def A( self , lowercase__=3_2):
set_seed(0)
__UpperCAmelCase : Dict = UNetaDModel(sample_size=lowercase__ , in_channels=3 , out_channels=3)
__UpperCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def A( self):
__UpperCAmelCase : Any = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__UpperCAmelCase : Any = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=lowercase__ , )
__UpperCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=lowercase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
__UpperCAmelCase : str = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 , 1).to(lowercase__) for _ in range(4)]
__UpperCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2)).to(lowercase__) for _ in range(4)]
__UpperCAmelCase : Any = [torch.randint(0 , 1_0_0_0 , (4,)).long().to(lowercase__) for _ in range(4)]
# train with a DDPM scheduler
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.get_model_optimizer(resolution=3_2)
model.train().to(lowercase__)
for i in range(4):
optimizer.zero_grad()
__UpperCAmelCase : Optional[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__UpperCAmelCase : Dict = model(lowercase__ , timesteps[i]).sample
__UpperCAmelCase : Dict = torch.nn.functional.mse_loss(lowercase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2)
model.train().to(lowercase__)
for i in range(4):
optimizer.zero_grad()
__UpperCAmelCase : int = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , timesteps[i]).sample
__UpperCAmelCase : str = torch.nn.functional.mse_loss(lowercase__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-5))
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-5))
| 675 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = [0 for i in range(r + 1 )]
# nc0 = 1
__UpperCAmelCase : List[Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__UpperCAmelCase : Any = min(lowercase_ , lowercase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 675 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : torch.FloatTensor
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
@register_to_config
def __init__( self , lowercase__ = 3 , lowercase__ = 3 , lowercase__ = ("DownEncoderBlock2D",) , lowercase__ = ("UpDecoderBlock2D",) , lowercase__ = (6_4,) , lowercase__ = 1 , lowercase__ = "silu" , lowercase__ = 3 , lowercase__ = 3_2 , lowercase__ = 2_5_6 , lowercase__ = 3_2 , lowercase__ = None , lowercase__ = 0.1_8_2_1_5 , lowercase__ = "group" , ):
super().__init__()
# pass init params to Encoder
__UpperCAmelCase : List[Any] = Encoder(
in_channels=lowercase__ , out_channels=lowercase__ , down_block_types=lowercase__ , block_out_channels=lowercase__ , layers_per_block=lowercase__ , act_fn=lowercase__ , norm_num_groups=lowercase__ , double_z=lowercase__ , )
__UpperCAmelCase : List[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
__UpperCAmelCase : Tuple = nn.Convad(lowercase__ , lowercase__ , 1)
__UpperCAmelCase : int = VectorQuantizer(lowercase__ , lowercase__ , beta=0.2_5 , remap=lowercase__ , sane_index_shape=lowercase__)
__UpperCAmelCase : Optional[Any] = nn.Convad(lowercase__ , lowercase__ , 1)
# pass init params to Decoder
__UpperCAmelCase : Union[str, Any] = Decoder(
in_channels=lowercase__ , out_channels=lowercase__ , up_block_types=lowercase__ , block_out_channels=lowercase__ , layers_per_block=lowercase__ , act_fn=lowercase__ , norm_num_groups=lowercase__ , norm_type=lowercase__ , )
@apply_forward_hook
def A( self , lowercase__ , lowercase__ = True):
__UpperCAmelCase : int = self.encoder(lowercase__)
__UpperCAmelCase : List[str] = self.quant_conv(lowercase__)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase__)
@apply_forward_hook
def A( self , lowercase__ , lowercase__ = False , lowercase__ = True):
# also go through quantization layer
if not force_not_quantize:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self.quantize(lowercase__)
else:
__UpperCAmelCase : Dict = h
__UpperCAmelCase : str = self.post_quant_conv(lowercase__)
__UpperCAmelCase : List[str] = self.decoder(lowercase__ , quant if self.config.norm_type == '''spatial''' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__)
def A( self , lowercase__ , lowercase__ = True):
__UpperCAmelCase : List[Any] = sample
__UpperCAmelCase : List[Any] = self.encode(lowercase__).latents
__UpperCAmelCase : Union[str, Any] = self.decode(lowercase__).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase__)
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
import math
from collections.abc import Callable
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
__UpperCAmelCase : float = xa
__UpperCAmelCase : float = xa
while True:
if x_n == x_na or function(lowercase_ ) == function(lowercase_ ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
__UpperCAmelCase : float = x_na - (
function(lowercase_ ) / ((function(lowercase_ ) - function(lowercase_ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCAmelCase : Dict = x_na
__UpperCAmelCase : Tuple = x_na
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> float:
'''simple docstring'''
return math.pow(lowercase_ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 675 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase = datasets.logging.get_logger(__name__)
lowerCAmelCase = """\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
"""
lowerCAmelCase = """\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project's README at https://github.com/google-research/bleurt#readme for more information.
"""
lowerCAmelCase = """
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
'scores': List of scores.
Examples:
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> bleurt = datasets.load_metric(\"bleurt\")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results[\"scores\"]])
[1.03, 1.04]
"""
lowerCAmelCase = {
"""bleurt-tiny-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip""",
"""bleurt-tiny-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip""",
"""bleurt-base-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip""",
"""bleurt-base-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip""",
"""bleurt-large-128""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip""",
"""bleurt-large-512""": """https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip""",
"""BLEURT-20-D3""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip""",
"""BLEURT-20-D6""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip""",
"""BLEURT-20-D12""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip""",
"""BLEURT-20""": """https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip""",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def A( self , lowercase__):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''')
__UpperCAmelCase : Optional[Any] = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__UpperCAmelCase : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__UpperCAmelCase : int = self.config_name.upper()
else:
raise KeyError(
F"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}")
# download the model checkpoint specified by self.config_name and set up the scorer
__UpperCAmelCase : Optional[int] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name])
__UpperCAmelCase : Optional[int] = score.BleurtScorer(os.path.join(lowercase__ , lowercase__))
def A( self , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[int] = self.scorer.score(references=lowercase__ , candidates=lowercase__)
return {"scores": scores}
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(lowercase_ , lowercase_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
__UpperCAmelCase : Optional[Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__UpperCAmelCase : Any = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=2 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , lowercase__=1_0_0_0 , ):
__UpperCAmelCase : int = parent
__UpperCAmelCase : Optional[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : Any = use_input_mask
__UpperCAmelCase : Tuple = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Tuple = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Dict = range_bbox
def A( self):
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
# convert bbox to numpy since TF does not support item assignment
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
__UpperCAmelCase : List[Any] = bbox[i, j, 3]
__UpperCAmelCase : Union[str, Any] = bbox[i, j, 1]
__UpperCAmelCase : str = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__UpperCAmelCase : Dict = bbox[i, j, 2]
__UpperCAmelCase : str = bbox[i, j, 0]
__UpperCAmelCase : int = t
__UpperCAmelCase : Any = tf.convert_to_tensor(lowercase__)
__UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : int = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : List[Any] = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : int = TFLayoutLMModel(config=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
__UpperCAmelCase : Dict = model(lowercase__ , lowercase__ , token_type_ids=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__ , lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[Any] = TFLayoutLMForMaskedLM(config=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : List[str] = self.num_labels
__UpperCAmelCase : int = TFLayoutLMForSequenceClassification(config=lowercase__)
__UpperCAmelCase : Dict = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = self.num_labels
__UpperCAmelCase : Any = TFLayoutLMForTokenClassification(config=lowercase__)
__UpperCAmelCase : Any = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Dict = TFLayoutLMForQuestionAnswering(config=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__ , lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def A( self):
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : str = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_lowerCAmelCase : Tuple = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[Any] = 1_0
def A( self):
__UpperCAmelCase : str = TFLayoutLMModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__)
def A( self):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__)
def A( self):
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__)
@slow
def A( self):
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Any = TFLayoutLMModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
@unittest.skip('''Onnx compliancy broke with TF 2.10''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231
__UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
__UpperCAmelCase : str = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231
__UpperCAmelCase : Optional[int] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
__UpperCAmelCase : Optional[int] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''')
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = prepare_layoutlm_batch_inputs()
# forward pass
__UpperCAmelCase : str = model(input_ids=lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
# test the sequence output on [0, :3, :3]
__UpperCAmelCase : str = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1e-3))
# test the pooled output on [1, :3]
__UpperCAmelCase : int = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2])
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , lowercase__ , atol=1e-3))
@slow
def A( self):
# initialize model with randomly initialized sequence classification head
__UpperCAmelCase : Tuple = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
__UpperCAmelCase : Dict = model(
input_ids=lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=tf.convert_to_tensor([1, 1]) , )
# test whether we get a loss as a scalar
__UpperCAmelCase : List[str] = outputs.loss
__UpperCAmelCase : Optional[int] = (2,)
self.assertEqual(loss.shape , lowercase__)
# test the shape of the logits
__UpperCAmelCase : Union[str, Any] = outputs.logits
__UpperCAmelCase : List[str] = (2, 2)
self.assertEqual(logits.shape , lowercase__)
@slow
def A( self):
# initialize model with randomly initialized token classification head
__UpperCAmelCase : int = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=1_3)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = prepare_layoutlm_batch_inputs()
# forward pass
__UpperCAmelCase : Union[str, Any] = model(
input_ids=lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
# test the shape of the logits
__UpperCAmelCase : Union[str, Any] = outputs.logits
__UpperCAmelCase : int = tf.convert_to_tensor((2, 2_5, 1_3))
self.assertEqual(logits.shape , lowercase__)
@slow
def A( self):
# initialize model with randomly initialized token classification head
__UpperCAmelCase : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''')
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = prepare_layoutlm_batch_inputs()
# forward pass
__UpperCAmelCase : Dict = model(input_ids=lowercase__ , bbox=lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
# test the shape of the logits
__UpperCAmelCase : Dict = tf.convert_to_tensor((2, 2_5))
self.assertEqual(outputs.start_logits.shape , lowercase__)
self.assertEqual(outputs.end_logits.shape , lowercase__)
| 675 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCamelCase :
def __init__( self , lowercase__):
__UpperCAmelCase : Any = data
__UpperCAmelCase : Node | None = None
class lowerCamelCase :
def __init__( self):
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : int = None
def __iter__( self):
__UpperCAmelCase : Union[str, Any] = self.head
while self.head:
yield node.data
__UpperCAmelCase : int = node.next
if node == self.head:
break
def __len__( self):
return sum(1 for _ in self)
def __repr__( self):
return "->".join(str(lowercase__) for item in iter(self))
def A( self , lowercase__):
self.insert_nth(len(self) , lowercase__)
def A( self , lowercase__):
self.insert_nth(0 , lowercase__)
def A( self , lowercase__ , lowercase__):
if index < 0 or index > len(self):
raise IndexError('''list index out of range.''')
__UpperCAmelCase : int = Node(lowercase__)
if self.head is None:
__UpperCAmelCase : Optional[int] = new_node # first node points itself
__UpperCAmelCase : List[str] = new_node
elif index == 0: # insert at head
__UpperCAmelCase : Any = self.head
__UpperCAmelCase : Optional[Any] = new_node
else:
__UpperCAmelCase : Optional[Any] = self.head
for _ in range(index - 1):
__UpperCAmelCase : Any = temp.next
__UpperCAmelCase : Dict = temp.next
__UpperCAmelCase : Optional[Any] = new_node
if index == len(self) - 1: # insert at tail
__UpperCAmelCase : Optional[int] = new_node
def A( self):
return self.delete_nth(0)
def A( self):
return self.delete_nth(len(self) - 1)
def A( self , lowercase__ = 0):
if not 0 <= index < len(self):
raise IndexError('''list index out of range.''')
__UpperCAmelCase : Dict = self.head
if self.head == self.tail: # just one node
__UpperCAmelCase : List[str] = None
elif index == 0: # delete head node
__UpperCAmelCase : Optional[int] = self.tail.next.next
__UpperCAmelCase : List[str] = self.head.next
else:
__UpperCAmelCase : Optional[int] = self.head
for _ in range(index - 1):
__UpperCAmelCase : Dict = temp.next
__UpperCAmelCase : Union[str, Any] = temp.next
__UpperCAmelCase : Optional[Any] = temp.next.next
if index == len(self) - 1: # delete at tail
__UpperCAmelCase : str = temp
return delete_node.data
def A( self):
return len(self) == 0
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Dict = CircularLinkedList()
assert len(lowercase_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase_ ) == i
circular_linked_list.insert_nth(lowercase_ , i + 1 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase_ ) == "->".join(str(lowercase_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
lowerCAmelCase = 256
# Modulus to hash a string
lowerCAmelCase = 1_000_003
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> bool:
'''simple docstring'''
__UpperCAmelCase : List[str] = len(lowercase_ )
__UpperCAmelCase : Tuple = len(lowercase_ )
if p_len > t_len:
return False
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase_ ):
__UpperCAmelCase : List[str] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__UpperCAmelCase : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__UpperCAmelCase : Any = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__UpperCAmelCase : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''abc1abc12'''
__UpperCAmelCase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__UpperCAmelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(lowercase_ , lowercase_ ) and not rabin_karp(lowercase_ , lowercase_ )
# Test 2)
__UpperCAmelCase : Union[str, Any] = '''ABABX'''
__UpperCAmelCase : List[Any] = '''ABABZABABYABABX'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 3)
__UpperCAmelCase : str = '''AAAB'''
__UpperCAmelCase : List[Any] = '''ABAAAAAB'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 4)
__UpperCAmelCase : Optional[Any] = '''abcdabcy'''
__UpperCAmelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(lowercase_ , lowercase_ )
# Test 5)
__UpperCAmelCase : Any = '''Lü'''
__UpperCAmelCase : Optional[int] = '''Lüsai'''
assert rabin_karp(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = '''Lue'''
assert not rabin_karp(lowercase_ , lowercase_ )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 675 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : int = StableUnCLIPPipeline
_lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_lowerCAmelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCAmelCase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_lowerCAmelCase : Any = False
def A( self):
__UpperCAmelCase : Union[str, Any] = 3_2
__UpperCAmelCase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=lowercase__ , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
__UpperCAmelCase : str = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=lowercase__ , num_layers=1 , )
torch.manual_seed(0)
__UpperCAmelCase : Any = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=lowercase__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0)
__UpperCAmelCase : Tuple = StableUnCLIPImageNormalizer(embedding_dim=lowercase__)
__UpperCAmelCase : Dict = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCAmelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase__ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ))
torch.manual_seed(0)
__UpperCAmelCase : Tuple = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase__ , layers_per_block=1 , upcast_attention=lowercase__ , use_linear_projection=lowercase__ , )
torch.manual_seed(0)
__UpperCAmelCase : List[str] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=lowercase__ , steps_offset=1 , )
torch.manual_seed(0)
__UpperCAmelCase : Dict = AutoencoderKL()
__UpperCAmelCase : List[Any] = {
# prior components
'''prior_tokenizer''': prior_tokenizer,
'''prior_text_encoder''': prior_text_encoder,
'''prior''': prior,
'''prior_scheduler''': prior_scheduler,
# image noising components
'''image_normalizer''': image_normalizer,
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder,
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
}
return components
def A( self , lowercase__ , lowercase__=0):
if str(lowercase__).startswith('''mps'''):
__UpperCAmelCase : List[Any] = torch.manual_seed(lowercase__)
else:
__UpperCAmelCase : int = torch.Generator(device=lowercase__).manual_seed(lowercase__)
__UpperCAmelCase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''prior_num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def A( self):
__UpperCAmelCase : Optional[Any] = torch_device == '''cpu'''
self._test_attention_slicing_forward_pass(test_max_difference=lowercase__)
def A( self):
__UpperCAmelCase : Any = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=lowercase__)
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def A( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A( self):
__UpperCAmelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''')
__UpperCAmelCase : int = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa)
pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Dict = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCAmelCase : Tuple = pipe('''anime turle''' , generator=lowercase__ , output_type='''np''')
__UpperCAmelCase : str = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase__ , lowercase__)
def A( self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase : Any = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa)
__UpperCAmelCase : Tuple = pipe.to(lowercase__)
pipe.set_progress_bar_config(disable=lowercase__)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : List[str] = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> list:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
if n_element < 1:
__UpperCAmelCase : str = ValueError('''a should be a positive number''' )
raise my_error
__UpperCAmelCase : Any = [1]
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = (0, 0, 0)
__UpperCAmelCase : int = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase = hamming(int(n))
print("""-----------------------------------------------------""")
print(F'The list with nth numbers is: {hamming_numbers}')
print("""-----------------------------------------------------""")
| 675 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Any = IFInpaintingPipeline
_lowerCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowerCAmelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def A( self):
return self._get_dummy_components()
def A( self , lowercase__ , lowercase__=0):
if str(lowercase__).startswith('''mps'''):
__UpperCAmelCase : List[Any] = torch.manual_seed(lowercase__)
else:
__UpperCAmelCase : List[str] = torch.Generator(device=lowercase__).manual_seed(lowercase__)
__UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase__)).to(lowercase__)
__UpperCAmelCase : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase__)).to(lowercase__)
__UpperCAmelCase : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
def A( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''')
def A( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1)
def A( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
def A( self):
self._test_save_load_local()
def A( self):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 675 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCAmelCase : Optional[Any] = model_type_to_module_name(lowercase_ )
__UpperCAmelCase : Tuple = importlib.import_module(f".{module_name}" , '''transformers.models''' )
try:
return getattr(lowercase_ , lowercase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowercase_ , '''__name__''' , lowercase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCAmelCase : Tuple = importlib.import_module('''transformers''' )
if hasattr(lowercase_ , lowercase_ ):
return getattr(lowercase_ , lowercase_ )
return None
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , **lowercase_ , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = get_file_from_repo(
lowercase_ , lowercase_ , cache_dir=lowercase_ , force_download=lowercase_ , resume_download=lowercase_ , proxies=lowercase_ , use_auth_token=lowercase_ , revision=lowercase_ , local_files_only=lowercase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowercase_ , encoding='''utf-8''' ) as reader:
return json.load(lowercase_ )
class lowerCamelCase :
def __init__( self):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''')
@classmethod
@replace_list_option_in_docstrings(lowercase__)
def A( cls , lowercase__ , **lowercase__):
__UpperCAmelCase : Optional[int] = kwargs.pop('''config''' , lowercase__)
__UpperCAmelCase : List[str] = kwargs.pop('''trust_remote_code''' , lowercase__)
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase , __UpperCAmelCase : int = ImageProcessingMixin.get_image_processor_dict(lowercase__ , **lowercase__)
__UpperCAmelCase : Optional[Any] = config_dict.get('''image_processor_type''' , lowercase__)
__UpperCAmelCase : List[str] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {}):
__UpperCAmelCase : str = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__UpperCAmelCase : Any = config_dict.pop('''feature_extractor_type''' , lowercase__)
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''')
__UpperCAmelCase : List[Any] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''')
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {}):
__UpperCAmelCase : Any = config_dict['''auto_map''']['''AutoFeatureExtractor''']
__UpperCAmelCase : Optional[int] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''')
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''')
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ , **lowercase__)
# It could be in `config.image_processor_type``
__UpperCAmelCase : str = getattr(lowercase__ , '''image_processor_type''' , lowercase__)
if hasattr(lowercase__ , '''auto_map''') and "AutoImageProcessor" in config.auto_map:
__UpperCAmelCase : int = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
__UpperCAmelCase : str = image_processor_class_from_name(lowercase__)
__UpperCAmelCase : Union[str, Any] = image_processor_auto_map is not None
__UpperCAmelCase : List[str] = image_processor_class is not None or type(lowercase__) in IMAGE_PROCESSOR_MAPPING
__UpperCAmelCase : Optional[int] = resolve_trust_remote_code(
lowercase__ , lowercase__ , lowercase__ , lowercase__)
if has_remote_code and trust_remote_code:
__UpperCAmelCase : List[str] = get_class_from_dynamic_module(
lowercase__ , lowercase__ , **lowercase__)
__UpperCAmelCase : List[str] = kwargs.pop('''code_revision''' , lowercase__)
if os.path.isdir(lowercase__):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase__ , **lowercase__)
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase__ , **lowercase__)
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase__) in IMAGE_PROCESSOR_MAPPING:
__UpperCAmelCase : Optional[int] = IMAGE_PROCESSOR_MAPPING[type(lowercase__)]
return image_processor_class.from_dict(lowercase__ , **lowercase__)
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}")
@staticmethod
def A( lowercase__ , lowercase__):
IMAGE_PROCESSOR_MAPPING.register(lowercase__ , lowercase__)
| 675 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = tmp_path_factory.getbasetemp() / '''cache'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''datasets'''
__UpperCAmelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
__UpperCAmelCase : List[Any] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
__UpperCAmelCase : Any = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
__UpperCAmelCase : List[Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __SCREAMING_SNAKE_CASE ( ) -> str:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 675 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 675 |
def __SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
lowerCAmelCase = generate_large_matrix()
lowerCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
'''simple docstring'''
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : List[Any] = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase : List[Any] = (left + right) // 2
__UpperCAmelCase : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase : Dict = mid + 1
else:
__UpperCAmelCase : Optional[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = len(grid[0] )
for i in range(len(lowercase_ ) ):
__UpperCAmelCase : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase : Tuple = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase : Union[str, Any] = timeit(f"{func}(grid=grid)" , setup=lowercase_ , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 675 | 1 |
from ...configuration_utils import PretrainedConfig
lowerCAmelCase = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''tapas'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1_0_2_4 , lowercase__=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__=1_0.0 , lowercase__=0 , lowercase__=1.0 , lowercase__=None , lowercase__=1.0 , lowercase__=False , lowercase__=None , lowercase__=1.0 , lowercase__=1.0 , lowercase__=False , lowercase__=False , lowercase__="ratio" , lowercase__=None , lowercase__=None , lowercase__=6_4 , lowercase__=3_2 , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__=None , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Tuple = num_hidden_layers
__UpperCAmelCase : str = num_attention_heads
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : List[Any] = intermediate_size
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_sizes
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
# Fine-tuning task hyperparameters
__UpperCAmelCase : Union[str, Any] = positive_label_weight
__UpperCAmelCase : int = num_aggregation_labels
__UpperCAmelCase : str = aggregation_loss_weight
__UpperCAmelCase : Dict = use_answer_as_supervision
__UpperCAmelCase : Any = answer_loss_importance
__UpperCAmelCase : Union[str, Any] = use_normalized_answer_loss
__UpperCAmelCase : int = huber_loss_delta
__UpperCAmelCase : Optional[int] = temperature
__UpperCAmelCase : List[Any] = aggregation_temperature
__UpperCAmelCase : List[str] = use_gumbel_for_cells
__UpperCAmelCase : List[str] = use_gumbel_for_aggregation
__UpperCAmelCase : List[str] = average_approximation_function
__UpperCAmelCase : Union[str, Any] = cell_selection_preference
__UpperCAmelCase : List[str] = answer_loss_cutoff
__UpperCAmelCase : str = max_num_rows
__UpperCAmelCase : int = max_num_columns
__UpperCAmelCase : Tuple = average_logits_per_cell
__UpperCAmelCase : Union[str, Any] = select_one_column
__UpperCAmelCase : List[str] = allow_empty_column_selection
__UpperCAmelCase : Optional[int] = init_cell_selection_weights_to_zero
__UpperCAmelCase : Any = reset_position_index_per_cell
__UpperCAmelCase : Optional[Any] = disable_per_token_loss
# Aggregation hyperparameters
__UpperCAmelCase : Optional[Any] = aggregation_labels
__UpperCAmelCase : int = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowercase__):
__UpperCAmelCase : Optional[Any] = {int(lowercase__): v for k, v in aggregation_labels.items()}
| 675 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 675 | 1 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = BertJapaneseTokenizer
_lowerCAmelCase : Tuple = False
_lowerCAmelCase : Union[str, Any] = True
def A( self):
super().setUp()
__UpperCAmelCase : Tuple = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''こんにちは''',
'''こん''',
'''にちは''',
'''ばんは''',
'''##こん''',
'''##にちは''',
'''##ばんは''',
'''世界''',
'''##世界''',
'''、''',
'''##、''',
'''。''',
'''##。''',
]
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def A( self , lowercase__):
__UpperCAmelCase : int = '''こんにちは、世界。 \nこんばんは、世界。'''
__UpperCAmelCase : Union[str, Any] = '''こんにちは 、 世界 。 こんばんは 、 世界 。'''
return input_text, output_text
def A( self , lowercase__):
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.get_input_output_texts(lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__)
__UpperCAmelCase : List[Any] = tokenizer.decode(lowercase__ , clean_up_tokenization_spaces=lowercase__)
return text, ids
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
__UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file)
__UpperCAmelCase : Tuple = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''')
self.assertListEqual(lowercase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def A( self):
__UpperCAmelCase : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''')
self.assertIsNotNone(lowercase__)
__UpperCAmelCase : List[Any] = '''こんにちは、世界。\nこんばんは、世界。'''
__UpperCAmelCase : Dict = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(lowercase__ , '''wb''') as handle:
pickle.dump(lowercase__ , lowercase__)
with open(lowercase__ , '''rb''') as handle:
__UpperCAmelCase : int = pickle.load(lowercase__)
__UpperCAmelCase : int = tokenizer_new.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Any = MecabTokenizer(mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A( self):
try:
__UpperCAmelCase : List[Any] = MecabTokenizer(mecab_dic='''unidic_lite''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A( self):
try:
__UpperCAmelCase : Optional[int] = MecabTokenizer(mecab_dic='''unidic''')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A( self):
__UpperCAmelCase : Optional[Any] = MecabTokenizer(do_lower_case=lowercase__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
def A( self):
try:
__UpperCAmelCase : int = MecabTokenizer(
do_lower_case=lowercase__ , normalize_text=lowercase__ , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
def A( self):
__UpperCAmelCase : Optional[Any] = MecabTokenizer(normalize_text=lowercase__ , mecab_dic='''ipadic''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , )
@require_sudachi
def A( self):
__UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''')
self.assertIsNotNone(lowercase__)
__UpperCAmelCase : Any = '''こんにちは、世界。\nこんばんは、世界。'''
__UpperCAmelCase : List[str] = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(lowercase__ , '''wb''') as handle:
pickle.dump(lowercase__ , lowercase__)
with open(lowercase__ , '''rb''') as handle:
__UpperCAmelCase : Any = pickle.load(lowercase__)
__UpperCAmelCase : int = tokenizer_new.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
@require_sudachi
def A( self):
__UpperCAmelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A( self):
__UpperCAmelCase : List[Any] = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国''', '''人''', '''参政''', '''権'''])
@require_sudachi
def A( self):
__UpperCAmelCase : Any = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人''', '''参政権'''])
@require_sudachi
def A( self):
__UpperCAmelCase : Any = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''')
self.assertListEqual(tokenizer.tokenize('''外国人参政権''') , ['''外国人参政権'''])
@require_sudachi
def A( self):
__UpperCAmelCase : str = SudachiTokenizer(do_lower_case=lowercase__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A( self):
__UpperCAmelCase : Optional[int] = SudachiTokenizer(normalize_text=lowercase__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , )
@require_sudachi
def A( self):
__UpperCAmelCase : List[Any] = SudachiTokenizer(trim_whitespace=lowercase__ , sudachi_dict_type='''core''')
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''')
self.assertIsNotNone(lowercase__)
__UpperCAmelCase : Union[str, Any] = '''こんにちは、世界。\nこんばんは、世界。'''
__UpperCAmelCase : List[Any] = tokenizer.tokenize(lowercase__)
self.assertListEqual(lowercase__ , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
__UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , '''tokenizer.bin''')
with open(lowercase__ , '''wb''') as handle:
pickle.dump(lowercase__ , lowercase__)
with open(lowercase__ , '''rb''') as handle:
__UpperCAmelCase : Union[str, Any] = pickle.load(lowercase__)
__UpperCAmelCase : Tuple = tokenizer_new.tokenize(lowercase__)
self.assertListEqual(lowercase__ , lowercase__)
@require_jumanpp
def A( self):
__UpperCAmelCase : List[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : Tuple = JumanppTokenizer(do_lower_case=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : List[Any] = JumanppTokenizer(normalize_text=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : Optional[Any] = JumanppTokenizer(trim_whitespace=lowercase__)
self.assertListEqual(
tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''') , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , )
@require_jumanpp
def A( self):
__UpperCAmelCase : Optional[int] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''') , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , )
def A( self):
__UpperCAmelCase : Dict = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''']
__UpperCAmelCase : Optional[Any] = {}
for i, token in enumerate(lowercase__):
__UpperCAmelCase : int = i
__UpperCAmelCase : Tuple = WordpieceTokenizer(vocab=lowercase__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こんにちは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは''') , ['''こん''', '''##ばんは'''])
self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''') , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''])
def A( self):
__UpperCAmelCase : Dict = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''')
__UpperCAmelCase : str = tokenizer.subword_tokenizer
__UpperCAmelCase : Optional[int] = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''')
self.assertListEqual(lowercase__ , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''])
__UpperCAmelCase : Tuple = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''')
self.assertListEqual(lowercase__ , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''])
def A( self):
__UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''')
__UpperCAmelCase : List[Any] = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowercase__)
__UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : List[str] = BertJapaneseTokenizer
_lowerCAmelCase : Tuple = False
def A( self):
super().setUp()
__UpperCAmelCase : Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
def A( self , **lowercase__):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Tuple = '''こんにちは、世界。 \nこんばんは、世界。'''
__UpperCAmelCase : Any = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'''
return input_text, output_text
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
pass # TODO add if relevant
def A( self):
__UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''')
__UpperCAmelCase : Optional[int] = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''')
self.assertListEqual(
lowercase__ , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def A( self):
__UpperCAmelCase : str = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。''']
__UpperCAmelCase : Dict = {}
for i, token in enumerate(lowercase__):
__UpperCAmelCase : int = i
__UpperCAmelCase : Tuple = CharacterTokenizer(vocab=lowercase__ , unk_token='''[UNK]''')
self.assertListEqual(tokenizer.tokenize('''''') , [])
self.assertListEqual(tokenizer.tokenize('''こんにちは''') , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''])
self.assertListEqual(tokenizer.tokenize('''こんにちほ''') , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''])
def A( self):
__UpperCAmelCase : str = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''')
__UpperCAmelCase : Optional[int] = tokenizer.encode('''ありがとう。''' , add_special_tokens=lowercase__)
__UpperCAmelCase : int = tokenizer.encode('''どういたしまして。''' , add_special_tokens=lowercase__)
__UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase__)
__UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : str = '''cl-tohoku/bert-base-japanese'''
__UpperCAmelCase : str = AutoTokenizer.from_pretrained(lowercase__)
self.assertIsInstance(lowercase__ , lowercase__)
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Tuple = '''cl-tohoku/bert-base-japanese'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertTokenizer.from_pretrained(lowercase__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
__UpperCAmelCase : str = '''bert-base-cased'''
with self.assertLogs('''transformers''' , level='''WARNING''') as cm:
BertJapaneseTokenizer.from_pretrained(lowercase__)
self.assertTrue(
cm.records[0].message.startswith(
'''The tokenizer class you load from this checkpoint is not the same type as the class this function'''
''' is called from.'''))
| 675 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , lowercase__ , lowercase__=1_3 , lowercase__=7 , lowercase__=True , lowercase__=True , lowercase__=False , lowercase__=True , lowercase__=9_9 , lowercase__=3_2 , lowercase__=5 , lowercase__=4 , lowercase__=3_7 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=1_6 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=3 , lowercase__=4 , lowercase__=None , ):
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Union[str, Any] = vocab_size
__UpperCAmelCase : Optional[int] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[str] = num_labels
__UpperCAmelCase : Dict = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def A( self):
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase : Dict = None
if self.use_input_mask:
__UpperCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
__UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A( self):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Union[str, Any] = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__)
__UpperCAmelCase : List[Any] = model(lowercase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
__UpperCAmelCase : Optional[Any] = BioGptForCausalLM(config=lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : str = BioGptModel(config=lowercase__)
model.to(lowercase__)
model.eval()
# create attention mask
__UpperCAmelCase : str = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
__UpperCAmelCase : int = self.seq_length // 2
__UpperCAmelCase : Any = 0
# first forward pass
__UpperCAmelCase , __UpperCAmelCase : Tuple = model(lowercase__ , attention_mask=lowercase__).to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__UpperCAmelCase : Tuple = ids_tensor((1,) , lowercase__).item() + 1
__UpperCAmelCase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__UpperCAmelCase : int = random_other_next_tokens
# append to next input_ids and attn_mask
__UpperCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : int = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowercase__)] , dim=1 , )
# get two different outputs
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : List[Any] = model(lowercase__ , past_key_values=lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
# select random slice
__UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase : int = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : int = BioGptModel(config=lowercase__).to(lowercase__).eval()
__UpperCAmelCase : List[str] = torch.ones(input_ids.shape , dtype=torch.long , device=lowercase__)
# first forward pass
__UpperCAmelCase : Union[str, Any] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__)
__UpperCAmelCase , __UpperCAmelCase : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__UpperCAmelCase : Any = torch.cat([input_ids, next_tokens] , dim=-1)
__UpperCAmelCase : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__UpperCAmelCase : List[Any] = model(lowercase__ , attention_mask=lowercase__)['''last_hidden_state''']
__UpperCAmelCase : int = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__)[
'''last_hidden_state'''
]
# select random slice
__UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__UpperCAmelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3))
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__ , lowercase__=False):
__UpperCAmelCase : int = BioGptForCausalLM(lowercase__)
model.to(lowercase__)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__UpperCAmelCase : Tuple = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A( self , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[int] = BioGptModel(lowercase__)
__UpperCAmelCase : int = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , *lowercase__):
__UpperCAmelCase : Optional[Any] = self.num_labels
__UpperCAmelCase : List[str] = BioGptForTokenClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : List[str] = model(lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A( self):
__UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : Union[str, Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[Any] = False
def A( self):
__UpperCAmelCase : int = BioGptModelTester(self)
__UpperCAmelCase : int = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7)
def A( self):
self.config_tester.run_common_tests()
def A( self):
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowercase__ , gradient_checkpointing=lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowercase__)
def A( self):
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowercase__)
@slow
def A( self):
__UpperCAmelCase : Any = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
__UpperCAmelCase : Dict = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : List[str] = '''left'''
# Define PAD Token = EOS Token = 50256
__UpperCAmelCase : List[Any] = tokenizer.eos_token
__UpperCAmelCase : Tuple = model.config.eos_token_id
# use different length sentences to test batching
__UpperCAmelCase : Optional[Any] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__UpperCAmelCase : int = tokenizer(lowercase__ , return_tensors='''pt''' , padding=lowercase__)
__UpperCAmelCase : Union[str, Any] = inputs['''input_ids'''].to(lowercase__)
__UpperCAmelCase : int = model.generate(
input_ids=lowercase__ , attention_mask=inputs['''attention_mask'''].to(lowercase__) , )
__UpperCAmelCase : Any = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Optional[int] = model.generate(input_ids=lowercase__)
__UpperCAmelCase : Optional[int] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__UpperCAmelCase : str = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(lowercase__)
__UpperCAmelCase : Any = model.generate(input_ids=lowercase__ , max_length=model.config.max_length - num_paddings)
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : Any = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : str = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(lowercase__ , lowercase__)
self.assertListEqual(lowercase__ , [non_padded_sentence, padded_sentence])
@slow
def A( self):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Union[str, Any] = BioGptModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = 3
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : int = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__UpperCAmelCase : Any = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A( self):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[Any] = 3
__UpperCAmelCase : Union[str, Any] = '''multi_label_classification'''
__UpperCAmelCase : List[Any] = input_dict['''input_ids''']
__UpperCAmelCase : Tuple = input_ids.ne(1).to(lowercase__)
__UpperCAmelCase : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__UpperCAmelCase : List[Any] = BioGptForSequenceClassification(lowercase__)
model.to(lowercase__)
model.eval()
__UpperCAmelCase : Optional[Any] = model(lowercase__ , attention_mask=lowercase__ , labels=lowercase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : Optional[int] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]])
__UpperCAmelCase : int = model(lowercase__)[0]
__UpperCAmelCase : Any = 4_2_3_8_4
__UpperCAmelCase : Tuple = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowercase__)
__UpperCAmelCase : Dict = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase__ , atol=1e-4))
@slow
def A( self):
__UpperCAmelCase : Union[str, Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
__UpperCAmelCase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(lowercase__)
torch.manual_seed(0)
__UpperCAmelCase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(lowercase__)
__UpperCAmelCase : List[str] = model.generate(
**lowercase__ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=lowercase__ , )
__UpperCAmelCase : List[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase__)
__UpperCAmelCase : int = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(lowercase__ , lowercase__)
| 675 | 1 |
import os
def __SCREAMING_SNAKE_CASE ( lowercase_ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowercase_ ) , lowercase_ ) ) as in_file:
__UpperCAmelCase : Any = in_file.read()
__UpperCAmelCase : Tuple = [[int(lowercase_ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
__UpperCAmelCase : Optional[int] = [[0 for cell in row] for row in grid]
__UpperCAmelCase : Dict = len(grid[0] )
__UpperCAmelCase : Optional[int] = [[0 for i in range(lowercase_ )] for j in range(lowercase_ )]
__UpperCAmelCase : Any = grid[0][0]
for i in range(1 , lowercase_ ):
__UpperCAmelCase : str = grid[0][i] + dp[0][i - 1]
for i in range(1 , lowercase_ ):
__UpperCAmelCase : Dict = grid[i][0] + dp[i - 1][0]
for i in range(1 , lowercase_ ):
for j in range(1 , lowercase_ ):
__UpperCAmelCase : Union[str, Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 675 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""",
"""bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""",
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"""
),
"""bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""",
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""",
"""bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""",
"""cl-tohoku/bert-base-japanese-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"""
),
"""cl-tohoku/bert-base-japanese-char-whole-word-masking""": (
"""https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"""
),
"""wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : int = '''bert'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=0 , lowercase__="absolute" , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[Any] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : List[str] = position_embedding_type
__UpperCAmelCase : Optional[Any] = use_cache
__UpperCAmelCase : List[Any] = classifier_dropout
class lowerCamelCase ( _UpperCamelCase ):
@property
def A( self):
if self.task == "multiple-choice":
__UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 675 | 1 |
from __future__ import annotations
from math import pow, sqrt
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase_ , 2 ) - pow(lowercase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase_ , 2 ) + pow(lowercase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = int(lowercase_ )
assert noofclusters < len(lowercase_ )
# Find out the dimensionality
__UpperCAmelCase : str = len(vectors[0] )
# Will help select random centroids from among the available vectors
__UpperCAmelCase : Union[str, Any] = list(range(len(lowercase_ ) ) )
shuffle(lowercase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__UpperCAmelCase : Union[str, Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__UpperCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__UpperCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
__UpperCAmelCase : str = tf.placeholder('''float64''' , [dim] )
__UpperCAmelCase : Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__UpperCAmelCase : Union[str, Any] = [tf.Variable(0 ) for i in range(len(lowercase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__UpperCAmelCase : Dict = tf.placeholder('''int32''' )
__UpperCAmelCase : Optional[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase_ , lowercase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__UpperCAmelCase : Any = tf.reduce_mean(lowercase_ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__UpperCAmelCase : Tuple = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.placeholder('''float''' , [dim] )
__UpperCAmelCase : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase_ , lowercase_ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__UpperCAmelCase : Union[str, Any] = tf.placeholder('''float''' , [noofclusters] )
__UpperCAmelCase : Optional[Any] = tf.argmin(lowercase_ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__UpperCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__UpperCAmelCase : Union[str, Any] = 100
for _ in range(lowercase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase_ ) ):
__UpperCAmelCase : List[str] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__UpperCAmelCase : List[Any] = [
sess.run(lowercase_ , feed_dict={va: vect, va: sess.run(lowercase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__UpperCAmelCase : Optional[Any] = sess.run(
lowercase_ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase_ ):
# Collect all the vectors assigned to this cluster
__UpperCAmelCase : Optional[Any] = [
vectors[i]
for i in range(len(lowercase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__UpperCAmelCase : str = sess.run(
lowercase_ , feed_dict={mean_input: array(lowercase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__UpperCAmelCase : List[str] = sess.run(lowercase_ )
__UpperCAmelCase : Tuple = sess.run(lowercase_ )
return centroids, assignments
| 675 | 1 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=_UpperCamelCase ):
_lowerCAmelCase : List[str] = ['''onnx''']
def __init__( self , *lowercase__ , **lowercase__):
requires_backends(self , ['''onnx'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''onnx'''])
@classmethod
def A( cls , *lowercase__ , **lowercase__):
requires_backends(cls , ['''onnx'''])
| 675 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
'''simple docstring'''
if not nums:
return 0
__UpperCAmelCase : int = nums[0]
__UpperCAmelCase : Optional[Any] = 0
for num in nums[1:]:
__UpperCAmelCase , __UpperCAmelCase : int = (
max_excluding + num,
max(lowercase_ , lowercase_ ),
)
return max(lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
lowerCAmelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = {}
with open(lowercase_ , '''r''' ) as file:
for line_number, line in enumerate(lowercase_ ):
__UpperCAmelCase : Any = line.strip()
if line:
__UpperCAmelCase : Optional[int] = line.split()
__UpperCAmelCase : Union[str, Any] = line_number
__UpperCAmelCase : str = words[0]
__UpperCAmelCase : int = value
return result
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
__UpperCAmelCase : Any = getattr(lowercase_ , lowercase_ )
__UpperCAmelCase : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
__UpperCAmelCase : str = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__UpperCAmelCase : Optional[Any] = '''param'''
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Optional[int] = getattr(lowercase_ , lowercase_ ).shape
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : List[Any] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__UpperCAmelCase : Tuple = getattr(lowercase_ , lowercase_ )
__UpperCAmelCase : List[Any] = shape_pointer.shape
# let's reduce dimension
__UpperCAmelCase : List[str] = value[0]
else:
__UpperCAmelCase : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCAmelCase : int = value
elif weight_type == "weight_g":
__UpperCAmelCase : Any = value
elif weight_type == "weight_v":
__UpperCAmelCase : Dict = value
elif weight_type == "bias":
__UpperCAmelCase : List[Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__UpperCAmelCase : Optional[Any] = getattr(lowercase_ , lowercase_ )
__UpperCAmelCase : str = value
else:
__UpperCAmelCase : str = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase_ ):
__UpperCAmelCase : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__UpperCAmelCase : List[Any] = '''param'''
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Tuple = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : List[Any] = '''.'''.join([key, hf_param_name] )
else:
__UpperCAmelCase : Tuple = key
__UpperCAmelCase : str = value if '''lm_head''' in full_key else value[0]
lowerCAmelCase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = False
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : str = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__UpperCAmelCase : List[str] = True
if "*" in mapped_key:
__UpperCAmelCase : Dict = name.split(lowercase_ )[0].split('''.''' )[-2]
__UpperCAmelCase : Optional[Any] = mapped_key.replace('''*''' , lowercase_ )
if "weight_g" in name:
__UpperCAmelCase : Tuple = '''weight_g'''
elif "weight_v" in name:
__UpperCAmelCase : List[str] = '''weight_v'''
elif "bias" in name:
__UpperCAmelCase : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : int = '''weight'''
else:
__UpperCAmelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
return is_used
return is_used
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Any = fairseq_model.state_dict()
__UpperCAmelCase : Dict = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == '''group''' , )
__UpperCAmelCase : Union[str, Any] = True
else:
__UpperCAmelCase : Optional[Any] = load_wavaveca_layer(lowercase_ , lowercase_ , lowercase_ )
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = full_name.split('''conv_layers.''' )[-1]
__UpperCAmelCase : Optional[int] = name.split('''.''' )
__UpperCAmelCase : Any = int(items[0] )
__UpperCAmelCase : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCAmelCase : List[str] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCAmelCase : Dict = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCAmelCase : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCAmelCase : Optional[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_=False ) -> Dict:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(lowercase_ )
else:
__UpperCAmelCase : Optional[int] = WavaVecaConfig()
if is_seq_class:
__UpperCAmelCase : str = read_txt_into_dict(lowercase_ )
__UpperCAmelCase : Union[str, Any] = idalabel
__UpperCAmelCase : Dict = WavaVecaForSequenceClassification(lowercase_ )
__UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
feature_extractor.save_pretrained(lowercase_ )
elif is_finetuned:
if dict_path:
__UpperCAmelCase : List[Any] = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : str = target_dict.pad_index
__UpperCAmelCase : Optional[Any] = target_dict.bos_index
__UpperCAmelCase : Optional[Any] = target_dict.eos_index
__UpperCAmelCase : Dict = len(target_dict.symbols )
__UpperCAmelCase : Optional[Any] = os.path.join(lowercase_ , '''vocab.json''' )
if not os.path.isdir(lowercase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
__UpperCAmelCase : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase : int = 0
__UpperCAmelCase : Dict = 1
with open(lowercase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
__UpperCAmelCase : Union[str, Any] = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase_ , )
__UpperCAmelCase : List[str] = True if config.feat_extract_norm == '''layer''' else False
__UpperCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
__UpperCAmelCase : Any = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
__UpperCAmelCase : Optional[Any] = WavaVecaForCTC(lowercase_ )
else:
__UpperCAmelCase : Optional[int] = WavaVecaForPreTraining(lowercase_ )
if is_finetuned or is_seq_class:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__UpperCAmelCase : Dict = argparse.Namespace(task='''audio_pretraining''' )
__UpperCAmelCase : Optional[Any] = fairseq.tasks.setup_task(lowercase_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
__UpperCAmelCase : Dict = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 675 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@require_torch
def A( self):
__UpperCAmelCase : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''')
__UpperCAmelCase : Optional[int] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Dict = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
@slow
@require_torch
def A( self):
__UpperCAmelCase : int = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
__UpperCAmelCase : Optional[Any] = load_dataset('''ashraq/esc50''')
__UpperCAmelCase : Union[str, Any] = dataset['''train''']['''audio'''][-1]['''array''']
__UpperCAmelCase : Union[str, Any] = audio_classifier(lowercase__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
__UpperCAmelCase : Optional[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''])
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
__UpperCAmelCase : Optional[Any] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5)
self.assertEqual(
nested_simplify(lowercase__) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''')
def A( self):
pass
| 675 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase = False
lowerCAmelCase = True
lowerCAmelCase = False
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCAmelCase = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCAmelCase = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowerCAmelCase = reader.read()
lowerCAmelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowerCAmelCase = UNetaDModel(**config)
else:
lowerCAmelCase = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCAmelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase = config[key]
del config[key]
lowerCAmelCase = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowerCAmelCase = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowerCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowerCAmelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowerCAmelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowerCAmelCase = param_value
lowerCAmelCase = True
if not has_changed:
lowerCAmelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 675 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__)
# create a imagenet -> id dictionary for easier use
__UpperCAmelCase : List[str] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
__UpperCAmelCase : Dict = int(lowercase__)
__UpperCAmelCase : Tuple = dict(sorted(self.labels.items()))
def A( self , lowercase__):
if not isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = list(lowercase__)
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.")
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 4.0 , lowercase__ = None , lowercase__ = 5_0 , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : List[str] = len(lowercase__)
__UpperCAmelCase : str = self.transformer.config.sample_size
__UpperCAmelCase : List[str] = self.transformer.config.in_channels
__UpperCAmelCase : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2) if guidance_scale > 1 else latents
__UpperCAmelCase : Union[str, Any] = torch.tensor(lowercase__ , device=self.device).reshape(-1)
__UpperCAmelCase : Dict = torch.tensor([1_0_0_0] * batch_size , device=self.device)
__UpperCAmelCase : int = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
__UpperCAmelCase : List[str] = latent_model_input[: len(lowercase__) // 2]
__UpperCAmelCase : Optional[Any] = torch.cat([half, half] , dim=0)
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__)
__UpperCAmelCase : Any = t
if not torch.is_tensor(lowercase__):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__UpperCAmelCase : List[str] = latent_model_input.device.type == '''mps'''
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.floataa if is_mps else torch.floataa
else:
__UpperCAmelCase : Dict = torch.intaa if is_mps else torch.intaa
__UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
__UpperCAmelCase : List[str] = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCAmelCase : Optional[int] = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
__UpperCAmelCase : Any = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__).sample
# perform guidance
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , len(lowercase__) // 2 , dim=0)
__UpperCAmelCase : List[str] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__UpperCAmelCase : str = torch.cat([half_eps, half_eps] , dim=0)
__UpperCAmelCase : Any = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = torch.split(lowercase__ , lowercase__ , dim=1)
else:
__UpperCAmelCase : Any = noise_pred
# compute previous image: x_t -> x_t-1
__UpperCAmelCase : Dict = self.scheduler.step(lowercase__ , lowercase__ , lowercase__).prev_sample
if guidance_scale > 1:
__UpperCAmelCase , __UpperCAmelCase : Any = latent_model_input.chunk(2 , dim=0)
else:
__UpperCAmelCase : List[Any] = latent_model_input
__UpperCAmelCase : List[str] = 1 / self.vae.config.scaling_factor * latents
__UpperCAmelCase : Optional[int] = self.vae.decode(lowercase__).sample
__UpperCAmelCase : List[str] = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__UpperCAmelCase : str = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(
features=lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ , streaming=lowercase__ , num_proc=lowercase__ , **lowercase__ , )
__UpperCAmelCase : Tuple = Generator(
cache_dir=lowercase__ , features=lowercase__ , generator=lowercase__ , gen_kwargs=lowercase__ , **lowercase__ , )
def A( self):
# Build iterable dataset
if self.streaming:
__UpperCAmelCase : Tuple = self.builder.as_streaming_dataset(split='''train''')
# Build regular (map-style) dataset
else:
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=lowercase__ , download_mode=lowercase__ , verification_mode=lowercase__ , base_path=lowercase__ , num_proc=self.num_proc , )
__UpperCAmelCase : List[Any] = self.builder.as_dataset(
split='''train''' , verification_mode=lowercase__ , in_memory=self.keep_in_memory)
return dataset
| 675 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 675 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def __SCREAMING_SNAKE_CASE ( lowercase_ = "mumbai" ) -> Generator[tuple[str, str], None, None]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__UpperCAmelCase : str = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__UpperCAmelCase : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(F'Job {i:>2} is {job[0]} at {job[1]}')
| 675 | 1 |
from __future__ import annotations
import bisect
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 0 , lowercase_ = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__UpperCAmelCase : Any = len(lowercase_ )
while lo < hi:
__UpperCAmelCase : int = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCAmelCase : Optional[Any] = mid + 1
else:
__UpperCAmelCase : Optional[int] = mid
return lo
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 0 , lowercase_ = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__UpperCAmelCase : Union[str, Any] = len(lowercase_ )
while lo < hi:
__UpperCAmelCase : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCAmelCase : Optional[Any] = mid + 1
else:
__UpperCAmelCase : List[Any] = mid
return lo
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 0 , lowercase_ = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ = 0 , lowercase_ = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Union[str, Any] = len(lowercase_ ) - 1
while left <= right:
__UpperCAmelCase : Dict = left + (right - left) // 2
__UpperCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCAmelCase : Optional[int] = midpoint - 1
else:
__UpperCAmelCase : Any = midpoint + 1
return None
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
__UpperCAmelCase : int = bisect.bisect_left(lowercase_ , lowercase_ )
if index != len(lowercase_ ) and sorted_collection[index] == item:
return index
return None
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int | None:
'''simple docstring'''
if right < left:
return None
__UpperCAmelCase : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase_ , lowercase_ , lowercase_ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase_ , lowercase_ , midpoint + 1 , lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
lowerCAmelCase = sorted(int(item) for item in user_input.split(""","""))
lowerCAmelCase = int(input("""Enter a single number to be found in the list:\n"""))
lowerCAmelCase = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 675 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=8 ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__UpperCAmelCase : Union[str, Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase ( _UpperCamelCase ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , ):
super().__init__()
self.register_modules(
unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
__UpperCAmelCase : Any = 2 ** (len(self.movq.config.block_out_channels) - 1)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__):
if latents is None:
__UpperCAmelCase : Any = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__)
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}")
__UpperCAmelCase : Union[str, Any] = latents.to(lowercase__)
__UpperCAmelCase : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def A( self , lowercase__=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''')
__UpperCAmelCase : List[str] = torch.device(F"cuda:{gpu_id}")
__UpperCAmelCase : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__)
def A( self , lowercase__=0):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0'''):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''')
__UpperCAmelCase : Optional[Any] = torch.device(F"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowercase__)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__UpperCAmelCase : List[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__UpperCAmelCase , __UpperCAmelCase : List[str] = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__)
# We'll offload the last model manually.
__UpperCAmelCase : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A( self):
if not hasattr(self.unet , '''_hf_hook'''):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , '''_hf_hook''')
and hasattr(module._hf_hook , '''execution_device''')
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__)
def __call__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 5_1_2 , lowercase__ = 5_1_2 , lowercase__ = 1_0_0 , lowercase__ = 4.0 , lowercase__ = 1 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , ):
__UpperCAmelCase : str = self._execution_device
__UpperCAmelCase : List[str] = guidance_scale > 1.0
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Dict = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Tuple = torch.cat(lowercase__ , dim=0)
if isinstance(lowercase__ , lowercase__):
__UpperCAmelCase : Any = torch.cat(lowercase__ , dim=0)
__UpperCAmelCase : Union[str, Any] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__UpperCAmelCase : Optional[int] = image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Dict = negative_image_embeds.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : List[Any] = hint.repeat_interleave(lowercase__ , dim=0)
__UpperCAmelCase : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
__UpperCAmelCase : List[Any] = torch.cat([hint, hint] , dim=0).to(dtype=self.unet.dtype , device=lowercase__)
self.scheduler.set_timesteps(lowercase__ , device=lowercase__)
__UpperCAmelCase : List[Any] = self.scheduler.timesteps
__UpperCAmelCase : Any = self.movq.config.latent_channels
__UpperCAmelCase , __UpperCAmelCase : List[str] = downscale_height_and_width(lowercase__ , lowercase__ , self.movq_scale_factor)
# create initial latent
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__)):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__UpperCAmelCase : Union[str, Any] = {'''image_embeds''': image_embeds, '''hint''': hint}
__UpperCAmelCase : Any = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
__UpperCAmelCase , __UpperCAmelCase : List[str] = noise_pred.chunk(2)
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = variance_pred.chunk(2)
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__UpperCAmelCase : int = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , '''variance_type''')
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : Tuple = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , )[0]
# post-processing
__UpperCAmelCase : str = self.movq.decode(lowercase__ , force_not_quantize=lowercase__)['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
__UpperCAmelCase : Dict = image * 0.5 + 0.5
__UpperCAmelCase : Union[str, Any] = image.clamp(0 , 1)
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase__)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__)
| 675 | 1 |
import logging
import os
from .state import PartialState
class lowerCamelCase ( logging.LoggerAdapter ):
@staticmethod
def A( lowercase__):
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def A( self , lowercase__ , lowercase__ , *lowercase__ , **lowercase__):
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''')
__UpperCAmelCase : List[str] = kwargs.pop('''main_process_only''' , lowercase__)
__UpperCAmelCase : Dict = kwargs.pop('''in_order''' , lowercase__)
if self.isEnabledFor(lowercase__):
if self._should_log(lowercase__):
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.process(lowercase__ , lowercase__)
self.logger.log(lowercase__ , lowercase__ , *lowercase__ , **lowercase__)
elif in_order:
__UpperCAmelCase : List[str] = PartialState()
for i in range(state.num_processes):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : str = self.process(lowercase__ , lowercase__)
self.logger.log(lowercase__ , lowercase__ , *lowercase__ , **lowercase__)
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
if log_level is None:
__UpperCAmelCase : Optional[Any] = os.environ.get('''ACCELERATE_LOG_LEVEL''' , lowercase_ )
__UpperCAmelCase : Any = logging.getLogger(lowercase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase_ , {} )
| 675 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 1 |
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
lowerCAmelCase = [
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"""
""" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"""
""" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.""",
"""The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"""
""" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"""
""" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"""
""" body.""",
"""Amnesty International releases its annual report on the death penalty. The report catalogs the use of"""
""" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"""
""" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"""
""" punishment.""",
]
lowerCAmelCase = [
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."""
""" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"""
""" had informed his Lufthansa training school of an episode of severe depression, airline says .""",
"""Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."""
""" Israel and the United States opposed the move, which could open the door to war crimes investigations against"""
""" Israelis .""",
"""Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"""
""" death . Organization claims that governments around the world are using the threat of terrorism to advance"""
""" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"""
""" sentences up by 28% .""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = calculate_rouge(lowercase_ , lowercase_ , bootstrap_aggregation=lowercase_ , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(lowercase_ , lowercase_ )
__UpperCAmelCase : Dict = calculate_rouge(lowercase_ , lowercase_ , bootstrap_aggregation=lowercase_ , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = '''rougeLsum'''
__UpperCAmelCase : Optional[int] = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=[k] )[k]
__UpperCAmelCase : str = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = ['''rouge1''', '''rouge2''', '''rougeL''']
__UpperCAmelCase : Optional[Any] = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=lowercase_ )
__UpperCAmelCase : Any = calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ , rouge_keys=lowercase_ )
assert score_sep == score_no_sep
def __SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : int = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
__UpperCAmelCase : List[str] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ ) == calculate_rouge(lowercase_ , lowercase_ , newline_sep=lowercase_ )
def __SCREAMING_SNAKE_CASE ( ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[str] = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
__UpperCAmelCase : List[Any] = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
__UpperCAmelCase : Union[str, Any] = calculate_rouge(lowercase_ , lowercase_ , rouge_keys=['''rougeLsum'''] , newline_sep=lowercase_ )['''rougeLsum''']
__UpperCAmelCase : Dict = calculate_rouge(lowercase_ , lowercase_ , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : List[str] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
__UpperCAmelCase : Dict = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(lowercase_ , lowercase_ )
__UpperCAmelCase : Tuple = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=lowercase_ )
assert isinstance(lowercase_ , lowercase_ )
| 675 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : List[str] = '''sew-d'''
def __init__( self , lowercase__=3_2 , lowercase__=7_6_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=3_0_7_2 , lowercase__=2 , lowercase__=5_1_2 , lowercase__=2_5_6 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1e-7 , lowercase__=1e-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=1_2_8 , lowercase__=1_6 , lowercase__=True , lowercase__=0.0_5 , lowercase__=1_0 , lowercase__=2 , lowercase__=0.0 , lowercase__=1_0 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=2_5_6 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ):
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__)
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = feat_extract_norm
__UpperCAmelCase : List[str] = feat_extract_activation
__UpperCAmelCase : str = list(lowercase__)
__UpperCAmelCase : Optional[int] = list(lowercase__)
__UpperCAmelCase : Tuple = list(lowercase__)
__UpperCAmelCase : Tuple = conv_bias
__UpperCAmelCase : int = num_conv_pos_embeddings
__UpperCAmelCase : int = num_conv_pos_embedding_groups
__UpperCAmelCase : Any = len(self.conv_dim)
__UpperCAmelCase : str = num_hidden_layers
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Union[str, Any] = squeeze_factor
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : List[str] = position_buckets
__UpperCAmelCase : Tuple = share_att_key
__UpperCAmelCase : int = relative_attention
__UpperCAmelCase : str = norm_rel_ebd
__UpperCAmelCase : Dict = list(lowercase__)
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[int] = hidden_dropout
__UpperCAmelCase : int = attention_dropout
__UpperCAmelCase : Optional[int] = activation_dropout
__UpperCAmelCase : Optional[Any] = feat_proj_dropout
__UpperCAmelCase : Optional[Any] = final_dropout
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : str = feature_layer_norm_eps
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : Tuple = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"
F"= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCAmelCase : Optional[int] = apply_spec_augment
__UpperCAmelCase : List[str] = mask_time_prob
__UpperCAmelCase : Union[str, Any] = mask_time_length
__UpperCAmelCase : Optional[int] = mask_time_min_masks
__UpperCAmelCase : Optional[int] = mask_feature_prob
__UpperCAmelCase : List[str] = mask_feature_length
__UpperCAmelCase : List[Any] = mask_feature_min_masks
# ctc loss
__UpperCAmelCase : int = ctc_loss_reduction
__UpperCAmelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
__UpperCAmelCase : List[str] = use_weighted_layer_sum
__UpperCAmelCase : Tuple = classifier_proj_size
@property
def A( self):
return functools.reduce(operator.mul , self.conv_stride , 1)
| 675 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''realm'''
def __init__( self , lowercase__=3_0_5_2_2 , lowercase__=7_6_8 , lowercase__=1_2_8 , lowercase__=1_2 , lowercase__=1_2 , lowercase__=8 , lowercase__=3_0_7_2 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=5_1_2 , lowercase__=2 , lowercase__=0.0_2 , lowercase__=1e-12 , lowercase__=2_5_6 , lowercase__=1_0 , lowercase__=1e-3 , lowercase__=5 , lowercase__=3_2_0 , lowercase__=1_3_3_5_3_7_1_8 , lowercase__=5_0_0_0 , lowercase__=1 , lowercase__=0 , lowercase__=2 , **lowercase__ , ):
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
# Common config
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Optional[Any] = retriever_proj_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Optional[Any] = num_attention_heads
__UpperCAmelCase : int = num_candidates
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = layer_norm_eps
# Reader config
__UpperCAmelCase : Optional[int] = span_hidden_size
__UpperCAmelCase : Dict = max_span_width
__UpperCAmelCase : int = reader_layer_norm_eps
__UpperCAmelCase : int = reader_beam_size
__UpperCAmelCase : Optional[int] = reader_seq_len
# Retrieval config
__UpperCAmelCase : Optional[int] = num_block_records
__UpperCAmelCase : Optional[Any] = searcher_beam_size
| 675 |
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError('''Quantized models are not supported.''' )
__UpperCAmelCase : List[Any] = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , lowercase_ )
if matches:
__UpperCAmelCase : Any = float(matches[1] )
__UpperCAmelCase : Optional[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__UpperCAmelCase : Dict = 1001
__UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
__UpperCAmelCase : List[str] = '''huggingface/label-files'''
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type='''dataset''' ) , '''r''' ) )
__UpperCAmelCase : int = {int(lowercase_ ) + 1: v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = '''background'''
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
return config
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCAmelCase : Tuple = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = get_mobilenet_va_config(lowercase_ )
# Load 🤗 model
__UpperCAmelCase : int = MobileNetVaForImageClassification(lowercase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowercase_ , lowercase_ , lowercase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__UpperCAmelCase : List[str] = MobileNetVaImageProcessor(
crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , )
__UpperCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCAmelCase : Union[str, Any] = model(**lowercase_ )
__UpperCAmelCase : Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__UpperCAmelCase : Any = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
__UpperCAmelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
__UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowercase_ , atol=1e-4 )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowercase_ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
__UpperCAmelCase : List[str] = '''google/''' + model_name
image_processor.push_to_hub(lowercase_ )
model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 | 1 |
import math
class lowerCamelCase :
def __init__( self , lowercase__=0): # a graph with Node 0,1,...,N-1
__UpperCAmelCase : Any = n
__UpperCAmelCase : int = [
[math.inf for j in range(0 , lowercase__)] for i in range(0 , lowercase__)
] # adjacency matrix for weight
__UpperCAmelCase : Tuple = [
[math.inf for j in range(0 , lowercase__)] for i in range(0 , lowercase__)
] # dp[i][j] stores minimum distance from i to j
def A( self , lowercase__ , lowercase__ , lowercase__):
__UpperCAmelCase : Optional[Any] = w
def A( self):
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
__UpperCAmelCase : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def A( self , lowercase__ , lowercase__):
return self.dp[u][v]
if __name__ == "__main__":
lowerCAmelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 675 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCamelCase :
_lowerCAmelCase : Optional[Union[str, Path]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = False
_lowerCAmelCase : bool = True
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : int = 1
_lowerCAmelCase : Optional[Union[str, bool]] = None
_lowerCAmelCase : bool = False
_lowerCAmelCase : Optional[Dict] = None
_lowerCAmelCase : Optional[str] = None
def A( self):
return self.__class__(**{k: copy.deepcopy(lowercase__) for k, v in self.__dict__.items()})
| 675 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ = 4000000 ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = []
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowercase_ )
__UpperCAmelCase , __UpperCAmelCase : Dict = b, a + b
return sum(lowercase_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 675 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__UpperCAmelCase : Dict = str(bin(lowercase_ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : List[Any] = str(bin(lowercase_ ) )[2:]
__UpperCAmelCase : List[Any] = max(len(lowercase_ ) , len(lowercase_ ) )
return "0b" + "".join(
str(int('''1''' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase_ ) , b_binary.zfill(lowercase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 675 | 1 |
# Algorithm for the pigeonhole sorting
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = min(lowercase_ ) # min() finds the minimum value
__UpperCAmelCase : str = max(lowercase_ ) # max() finds the maximum value
__UpperCAmelCase : Optional[int] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__UpperCAmelCase : str = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowercase_ , lowercase_ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__UpperCAmelCase : int = 0
for count in range(lowercase_ ):
while holes[count] > 0:
holes[count] -= 1
__UpperCAmelCase : str = count + min_val
i += 1
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowercase_ )
print('''Sorted order is:''' , ''' '''.join(lowercase_ ) )
if __name__ == "__main__":
main()
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.