code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections import defaultdict
from math import ceil, sqrt
def A__( __lowerCAmelCase = 1_00_00_00 , __lowerCAmelCase = 10 ):
_snake_case : defaultdict = defaultdict(__lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_snake_case : List[Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_snake_case : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase_ : int = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowercase ( _snake_case ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **lowerCamelCase_ : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict , lowerCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = {}
if "candidate_labels" in kwargs:
_snake_case : Tuple = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_snake_case : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
_snake_case : str = load_image(lowerCamelCase_ )
_snake_case : Optional[int] = self.image_processor(images=[image] , return_tensors=self.framework )
_snake_case : List[str] = candidate_labels
_snake_case : Union[str, Any] = [hypothesis_template.format(lowerCamelCase_ ) for x in candidate_labels]
_snake_case : int = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_ )
_snake_case : str = [text_inputs]
return inputs
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Tuple = model_inputs.pop('candidate_labels' )
_snake_case : int = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCamelCase_ ):
_snake_case : Dict = text_inputs[0]
else:
# Batching case.
_snake_case : Any = text_inputs[0][0]
_snake_case : str = self.model(**lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : List[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = model_outputs.pop('candidate_labels' )
_snake_case : Tuple = model_outputs['logits'][0]
if self.framework == "pt":
_snake_case : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
_snake_case : Any = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : List[str] = [scores]
elif self.framework == "tf":
_snake_case : List[Any] = stable_softmax(lowerCamelCase_ , axis=-1 )
_snake_case : Optional[int] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
_snake_case : Any = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_ ) , key=lambda lowerCamelCase_ : -x[0] )
]
return result
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import requests
from bsa import BeautifulSoup
def A__( __lowerCAmelCase = "https://www.worldometers.info/coronavirus" ):
_snake_case : Optional[int] = BeautifulSoup(requests.get(lowerCAmelCase__ ).text , 'html.parser' )
_snake_case : str = soup.findAll('h1' )
_snake_case : Union[str, Any] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCAmelCase__ , lowerCAmelCase__ )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 714 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : str = RobertaTokenizer
_UpperCamelCase : Any = RobertaTokenizerFast
_UpperCamelCase : Any = True
_UpperCamelCase : Optional[int] = {"cls_token": "<s>"}
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_snake_case : List[str] = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
_snake_case : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_snake_case : Union[str, Any] = {"""unk_token""": """<unk>"""}
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCamelCase ) )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Tuple = """lower newer"""
_snake_case : Dict = """lower newer"""
return input_text, output_text
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case : List[Any] = """lower newer"""
_snake_case : str = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_snake_case : Any = tokenizer.tokenize(_UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_snake_case : Optional[Any] = tokens + [tokenizer.unk_token]
_snake_case : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_UpperCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCamelCase )
_snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCamelCase )
_snake_case : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : Union[str, Any] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : List[str] = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : str = self.get_tokenizer()
_snake_case : List[Any] = """Encode this sequence."""
_snake_case : Any = tokenizer.byte_encoder[""" """.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case : Any = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
_snake_case : List[str] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase , add_prefix_space=_UpperCamelCase )
_snake_case : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case : Optional[Any] = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_snake_case : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
# Testing spaces after special tokens
_snake_case : Optional[Any] = """<mask>"""
tokenizer.add_special_tokens(
{'mask_token': AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase )} ) # mask token has a left space
_snake_case : int = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
_snake_case : str = """Encode <mask> sequence"""
_snake_case : Union[str, Any] = """Encode <mask>sequence"""
_snake_case : str = tokenizer.encode(_UpperCamelCase )
_snake_case : Optional[int] = encoded.index(_UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
_snake_case : Any = tokenizer.encode(_UpperCamelCase )
_snake_case : Any = encoded.index(_UpperCamelCase )
_snake_case : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_snake_case : Dict = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_snake_case : str = """A, <mask> AllenNLP sentence."""
_snake_case : int = tokenizer_r.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
_snake_case : Union[str, Any] = tokenizer_p.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_token_type_ids=_UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_UpperCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] , _UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] , _UpperCamelCase )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case : str = f'''{text_of_1_token} {text_of_1_token}'''
_snake_case : int = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : List[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Optional[Any] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Optional[int] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ), len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Union[str, Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : List[str] = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : int = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
_snake_case : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , add_prefix_space=_UpperCamelCase , trim_offsets=_UpperCamelCase )
_snake_case : Tuple = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ), 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
| 715 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 0 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def A__( __lowerCAmelCase ):
_snake_case : Optional[int] = SwinConfig()
_snake_case : Union[str, Any] = swin_name.split('_' )
_snake_case : Dict = name_split[1]
_snake_case : Optional[Any] = int(name_split[4] )
_snake_case : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
_snake_case : str = 96
_snake_case : List[Any] = (2, 2, 6, 2)
_snake_case : str = (3, 6, 12, 24)
elif model_size == "small":
_snake_case : Optional[Any] = 96
_snake_case : Dict = (2, 2, 18, 2)
_snake_case : Tuple = (3, 6, 12, 24)
elif model_size == "base":
_snake_case : Union[str, Any] = 1_28
_snake_case : List[str] = (2, 2, 18, 2)
_snake_case : int = (4, 8, 16, 32)
else:
_snake_case : Any = 1_92
_snake_case : str = (2, 2, 18, 2)
_snake_case : Any = (6, 12, 24, 48)
if "in22k" in swin_name:
_snake_case : Dict = 2_18_41
else:
_snake_case : Optional[int] = 10_00
_snake_case : Dict = 'huggingface/label-files'
_snake_case : Any = 'imagenet-1k-id2label.json'
_snake_case : int = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type='dataset' ) , 'r' ) )
_snake_case : Dict = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_snake_case : Dict = idalabel
_snake_case : Dict = {v: k for k, v in idalabel.items()}
_snake_case : Any = img_size
_snake_case : Dict = num_classes
_snake_case : List[Any] = embed_dim
_snake_case : Dict = depths
_snake_case : Optional[Any] = num_heads
_snake_case : Tuple = window_size
return config
def A__( __lowerCAmelCase ):
if "patch_embed.proj" in name:
_snake_case : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_snake_case : Optional[Any] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_snake_case : List[Any] = 'encoder.' + name
if "attn.proj" in name:
_snake_case : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_snake_case : Any = name.replace('attn' , 'attention.self' )
if "norm1" in name:
_snake_case : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_snake_case : int = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_snake_case : List[Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_snake_case : Tuple = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
_snake_case : Any = 'layernorm.weight'
if name == "norm.bias":
_snake_case : Union[str, Any] = 'layernorm.bias'
if "head" in name:
_snake_case : int = name.replace('head' , 'classifier' )
else:
_snake_case : str = 'swin.' + name
return name
def A__( __lowerCAmelCase , __lowerCAmelCase ):
for key in orig_state_dict.copy().keys():
_snake_case : Any = orig_state_dict.pop(_lowerCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
_snake_case : str = key.split('.' )
_snake_case : List[str] = int(key_split[1] )
_snake_case : Union[str, Any] = int(key_split[3] )
_snake_case : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_snake_case : str = val[:dim, :]
_snake_case : int = val[
dim : dim * 2, :
]
_snake_case : str = val[-dim:, :]
else:
_snake_case : Any = val[
:dim
]
_snake_case : Union[str, Any] = val[
dim : dim * 2
]
_snake_case : str = val[
-dim:
]
else:
_snake_case : List[Any] = val
return orig_state_dict
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : int = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
_snake_case : Optional[int] = get_swin_config(_lowerCamelCase )
_snake_case : Union[str, Any] = SwinForImageClassification(_lowerCamelCase )
model.eval()
_snake_case : List[str] = convert_state_dict(timm_model.state_dict() , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
_snake_case : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case : Dict = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
_snake_case : List[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
_snake_case : Optional[int] = image_processor(images=_lowerCamelCase , return_tensors='pt' )
_snake_case : List[Any] = timm_model(inputs['pixel_values'] )
_snake_case : int = model(**_lowerCamelCase ).logits
assert torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase_ : Dict = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 0 |
'''simple docstring'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if exponent == 1:
return base
if exponent % 2 == 0:
_snake_case : Any = _modexpt(_lowerCamelCase , exponent // 2 , _lowerCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_lowerCamelCase , exponent - 1 , _lowerCamelCase )) % modulo_value
def A__( __lowerCAmelCase = 17_77 , __lowerCAmelCase = 18_55 , __lowerCAmelCase = 8 ):
_snake_case : Dict = base
for _ in range(1 , _lowerCamelCase ):
_snake_case : Optional[Any] = _modexpt(_lowerCamelCase , _lowerCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
import numpy as np
def A__( __lowerCAmelCase ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 0 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : List[str] = int(_A )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : int = []
_snake_case : Any = 2
while len(_A ) < nth:
if is_prime(_A ):
primes.append(_A )
num += 1
else:
num += 1
return primes[len(_A ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 719 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : int = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = KandinskyInpaintPipeline
_UpperCamelCase : List[str] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
_UpperCamelCase : List[str] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
_UpperCamelCase : Tuple = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Tuple = False
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return 1_00
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
_snake_case : List[Any] = MultilingualCLIP(_lowercase )
_snake_case : List[str] = text_encoder.eval()
return text_encoder
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Any = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_snake_case : List[str] = UNetaDConditionModel(**_lowercase )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : int = self.dummy_text_encoder
_snake_case : Tuple = self.dummy_tokenizer
_snake_case : Union[str, Any] = self.dummy_unet
_snake_case : Dict = self.dummy_movq
_snake_case : Any = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=_lowercase , )
_snake_case : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]=0 ):
'''simple docstring'''
_snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase ) ).to(_lowercase )
_snake_case : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowercase )
# create init_image
_snake_case : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
_snake_case : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_snake_case : Any = Image.fromarray(np.uinta(_lowercase ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
_snake_case : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
_snake_case : Tuple = 0
if str(_lowercase ).startswith('mps' ):
_snake_case : List[str] = torch.manual_seed(_lowercase )
else:
_snake_case : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_snake_case : Tuple = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : int = """cpu"""
_snake_case : Optional[int] = self.get_dummy_components()
_snake_case : Dict = self.pipeline_class(**_lowercase )
_snake_case : Optional[int] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_snake_case : List[str] = pipe(**self.get_dummy_inputs(_lowercase ) )
_snake_case : Any = output.images
_snake_case : Dict = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
_snake_case : Any = image[0, -3:, -3:, -1]
_snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_snake_case : Union[str, Any] = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
_snake_case : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_snake_case : str = np.ones((7_68, 7_68) , dtype=np.floataa )
_snake_case : Union[str, Any] = 0
_snake_case : Any = """a hat"""
_snake_case : str = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
_snake_case : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
_snake_case : List[str] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
_snake_case : Any = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : Optional[Any] = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_snake_case : str = pipeline(
_lowercase , image=_lowercase , mask_image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
_snake_case : Any = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 721 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ : Tuple = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[int] = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[str] = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_snake_case : Optional[Any] = 0
count += depth_first_search(__SCREAMING_SNAKE_CASE , row + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , row - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col + 1 , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col - 1 , __SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
class lowercase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Tuple = n
_snake_case : Tuple = [None] * self.n
_snake_case : Optional[int] = 0 # index of the first element
_snake_case : Tuple = 0
_snake_case : Any = 0
def __len__( self : Optional[Any] ):
'''simple docstring'''
return self.size
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.size == 0
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_snake_case : int = data
_snake_case : Dict = (self.rear + 1) % self.n
self.size += 1
return self
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
_snake_case : str = self.array[self.front]
_snake_case : int = None
_snake_case : Any = (self.front + 1) % self.n
self.size -= 1
return temp
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
import math
def A__( __lowerCAmelCase = 1_00 ):
_snake_case : Tuple = sum(i * i for i in range(1 , n + 1 ) )
_snake_case : Optional[int] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : int = 16 , lowerCamelCase_ : int = 88 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 1 , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "geglu" , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , in_channels=lowerCamelCase_ , num_layers=lowerCamelCase_ , dropout=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , cross_attention_dim=lowerCamelCase_ , attention_bias=lowerCamelCase_ , sample_size=lowerCamelCase_ , num_vector_embeds=lowerCamelCase_ , activation_fn=lowerCamelCase_ , num_embeds_ada_norm=lowerCamelCase_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case : Optional[Any] = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case : Optional[Any] = [1, 0]
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : bool = True , ):
'''simple docstring'''
_snake_case : List[str] = hidden_states
_snake_case : str = []
_snake_case : List[str] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case : str = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case : str = self.transformer_index_for_condition[i]
_snake_case : List[str] = self.transformers[transformer_index](
lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , timestep=lowerCamelCase_ , cross_attention_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case : str = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case : str = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCamelCase_ )
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
_snake_case : Union[str, Any] = ksize + 1
_snake_case : Union[str, Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCAmelCase ):
for x in range(__lowerCAmelCase ):
# distance from center
_snake_case : Optional[int] = x - ksize // 2
_snake_case : Any = y - ksize // 2
# degree to radiant
_snake_case : Dict = theta / 1_80 * np.pi
_snake_case : int = np.cos(_theta )
_snake_case : List[Any] = np.sin(_theta )
# get kernel x
_snake_case : Dict = cos_theta * px + sin_theta * py
# get kernel y
_snake_case : Tuple = -sin_theta * px + cos_theta * py
# fill kernel
_snake_case : List[Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
lowercase_ : List[str] = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase_ : Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
lowercase_ : List[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
lowercase_ : Tuple = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
lowercase_ : int = out / out.max() * 255
lowercase_ : List[Any] = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def A__( ):
_snake_case : Optional[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_snake_case : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
return image
def A__( __lowerCAmelCase ):
_snake_case : str = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = dct.pop(snake_case__ )
_snake_case : Optional[int] = val
def A__( __lowerCAmelCase , __lowerCAmelCase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_snake_case : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
_snake_case : Tuple = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_snake_case : List[str] = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
_snake_case : Dict = qkv_bias
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = 3_64 if """coco""" in model_name else 2_24
_snake_case : List[str] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
_snake_case : List[str] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
_snake_case : Tuple = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_snake_case : List[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
_snake_case : int = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def A__( __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case : List[str] = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if """opt""" in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
_snake_case : str = tokenizer('\n' , add_special_tokens=snake_case__ ).input_ids[0]
_snake_case : Dict = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
_snake_case : str = BlipaForConditionalGeneration(snake_case__ ).eval()
_snake_case : int = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_snake_case : List[Any] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case : Any = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print('Done!' )
# update state dict keys
_snake_case : Any = original_model.state_dict()
_snake_case : Dict = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_snake_case : str = state_dict.pop(snake_case__ )
if key.startswith('Qformer.bert' ):
_snake_case : str = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
_snake_case : Any = key.replace('self' , 'attention' )
if "opt_proj" in key:
_snake_case : List[str] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
_snake_case : Optional[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
_snake_case : List[Any] = key.replace('opt' , 'language' )
if key.startswith('t5' ):
_snake_case : List[Any] = key.replace('t5' , 'language' )
_snake_case : str = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
_snake_case : List[str] = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_snake_case : Any = load_demo_image()
_snake_case : str = vis_processors["""eval"""](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
_snake_case : List[Any] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(snake_case__ )
# create processor
_snake_case : Any = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=snake_case__ , image_std=snake_case__ )
_snake_case : int = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
_snake_case : Any = processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
_snake_case : str = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
_snake_case : int = hf_model(snake_case__ , snake_case__ ).logits
else:
_snake_case : str = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
_snake_case : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_snake_case : Union[str, Any] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_snake_case : List[str] = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_snake_case : Union[str, Any] = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=snake_case__ )
else:
# cast to same type
_snake_case : int = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
_snake_case : Any = """"""
_snake_case : str = tokenizer(snake_case__ , return_tensors='pt' ).input_ids.to(snake_case__ )
_snake_case : Union[str, Any] = original_model.generate({'image': original_pixel_values} )
_snake_case : Tuple = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , snake_case__ )
_snake_case : Optional[Any] = input_ids.shape[1]
_snake_case : int = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
_snake_case : Optional[Any] = [text.strip() for text in output_text]
print('HF generation:' , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
lowercase_ : str = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowercase_ : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
if config_name_or_path is None:
_snake_case : Optional[Any] = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
_snake_case : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_snake_case : str = question_encoder_name_or_path
_snake_case : Optional[int] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
_snake_case : int = RagConfig.from_pretrained(__snake_case )
_snake_case : Any = AutoConfig.from_pretrained(__snake_case )
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(__snake_case )
_snake_case : List[Any] = gen_config
_snake_case : Any = question_encoder_config
_snake_case : Dict = model_class.from_pretrained_question_encoder_generator(
__snake_case , __snake_case , config=__snake_case )
rag_model.save_pretrained(__snake_case )
# Sanity check.
model_class.from_pretrained(__snake_case )
# Save tokenizers.
_snake_case : List[str] = AutoTokenizer.from_pretrained(__snake_case )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(__snake_case )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowercase_ : List[str] = parser.parse_args()
lowercase_ : List[str] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowercase_ : Dict = True
except ImportError:
lowercase_ : Any = False
lowercase_ : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def A__( __lowerCAmelCase ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowercase ( _UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : ArgumentParser ):
'''simple docstring'''
_snake_case : int = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=lowercase_ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=lowercase_ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=lowercase_ )
def __init__( self : Optional[int] , lowerCamelCase_ : bool , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]=None , *lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = testing
_snake_case : List[Any] = testing_file
_snake_case : int = path
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
_snake_case : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(lowercase_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
_snake_case : str = (
Path(lowercase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
_snake_case : List[Any] = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase_ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
_snake_case : Union[str, Any] = json.load(lowercase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase_ , extra_context=lowercase_ , )
_snake_case : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
_snake_case : Optional[int] = json.load(lowercase_ )
_snake_case : List[Any] = configuration["""lowercase_modelname"""]
_snake_case : List[Any] = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'''{directory}/configuration.json''' )
_snake_case : Any = """PyTorch""" in generate_tensorflow_pytorch_and_flax
_snake_case : Tuple = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
_snake_case : Any = """Flax""" in generate_tensorflow_pytorch_and_flax
_snake_case : List[str] = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowercase_ , exist_ok=lowercase_ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowercase_ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(lowerCamelCase_ : int ):
with open(lowercase_ , 'r' ) as f:
_snake_case : Union[str, Any] = f.readlines()
with open(lowercase_ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : List[str] ):
# Create temp file
_snake_case : Any = mkstemp()
_snake_case : List[str] = False
with fdopen(lowercase_ , 'w' ) as new_file:
with open(lowercase_ ) as old_file:
for line in old_file:
new_file.write(lowercase_ )
if line_to_copy_below in line:
_snake_case : int = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase_ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowercase_ , lowercase_ )
# Remove original file
remove(lowercase_ )
# Move new file
move(lowercase_ , lowercase_ )
def skip_units(lowerCamelCase_ : str ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowerCamelCase_ : int ):
with open(lowercase_ ) as datafile:
_snake_case : Optional[int] = []
_snake_case : List[Any] = False
_snake_case : Dict = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
_snake_case : Optional[Any] = line.split('\"' )[1]
_snake_case : Optional[int] = skip_units(lowercase_ )
elif "# Below: " in line and "##" not in line:
_snake_case : List[str] = line.split('\"' )[1]
_snake_case : Union[str, Any] = skip_units(lowercase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase_ , lowercase_ , lowercase_ )
_snake_case : int = []
elif "# Replace with" in line and "##" not in line:
_snake_case : Optional[int] = []
elif "##" not in line:
lines_to_copy.append(lowercase_ )
remove(lowercase_ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowercase_ )
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
lowercase_ : Tuple = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Dict = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case : Union[str, Any] = 'The dog is cute and lives in the garden house'
_snake_case : Union[str, Any] = jnp.array([tokenizer.encode(__lowerCAmelCase )] )
_snake_case : Optional[int] = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
_snake_case : Union[str, Any] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case : str = model(__lowerCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1e-3 ) )
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(_snake_case , _snake_case ) or not all(isinstance(_snake_case , _snake_case ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_snake_case ) != 3 or not all(isinstance(_snake_case , _snake_case ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_snake_case ) == 0:
return 0
if min(_snake_case ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_snake_case ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : List[Any] = set(_snake_case )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class lowercase :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Any=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Tuple=6 , lowerCamelCase_ : int=17 , lowerCamelCase_ : List[str]=23 , lowerCamelCase_ : Optional[Any]=11 , lowerCamelCase_ : List[Any]=True , ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[int] = batch_size
_snake_case : Any = seq_length
_snake_case : int = act_dim
_snake_case : Any = state_dim
_snake_case : Dict = hidden_size
_snake_case : Optional[Any] = max_length
_snake_case : List[Any] = is_training
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Tuple = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
_snake_case : Tuple = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
_snake_case : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
_snake_case : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) )
_snake_case : str = ids_tensor((self.batch_size, self.seq_length) , vocab_size=10_00 )
_snake_case : List[str] = random_attention_mask((self.batch_size, self.seq_length) )
_snake_case : List[str] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , ):
'''simple docstring'''
_snake_case : Tuple = DecisionTransformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_snake_case : List[str] = model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.prepare_config_and_inputs()
(
_snake_case
) : Union[str, Any] = config_and_inputs
_snake_case : Optional[Any] = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Dict = (DecisionTransformerModel,) if is_torch_available() else ()
_UpperCamelCase : Dict = ()
_UpperCamelCase : Optional[int] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_UpperCamelCase : Dict = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : List[str] = False
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : List[Any] = DecisionTransformerModelTester(self )
_snake_case : Any = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : List[str] = DecisionTransformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : int = model_class(UpperCamelCase__ )
_snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : str = [*signature.parameters.keys()]
_snake_case : str = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : str = 2 # number of steps of autoregressive prediction we will perform
_snake_case : Tuple = 10 # defined by the RL environment, may be normalized
_snake_case : Optional[int] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
_snake_case : Any = model.to(UpperCamelCase__ )
_snake_case : int = model.config
torch.manual_seed(0 )
_snake_case : Any = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase__ , dtype=torch.floataa ) # env.reset()
_snake_case : Optional[int] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=UpperCamelCase__ )
_snake_case : Tuple = torch.tensor(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
_snake_case : Optional[int] = state
_snake_case : Any = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase__ , dtype=torch.floataa )
_snake_case : int = torch.zeros(1 , 0 , device=UpperCamelCase__ , dtype=torch.floataa )
_snake_case : Any = torch.tensor(0 , device=UpperCamelCase__ , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase__ ):
_snake_case : str = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase__ )] , dim=1 )
_snake_case : Union[str, Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase__ )] , dim=1 )
_snake_case : Tuple = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
_snake_case : str = model(
states=UpperCamelCase__ , actions=UpperCamelCase__ , rewards=UpperCamelCase__ , returns_to_go=UpperCamelCase__ , timesteps=UpperCamelCase__ , attention_mask=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) )
_snake_case : str = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase__ , dtype=torch.floataa ),
1.0,
False,
{},
)
_snake_case : Optional[Any] = action_pred[0, -1]
_snake_case : Tuple = torch.cat([states, state] , dim=1 )
_snake_case : Tuple = returns_to_go[0, -1] - reward
_snake_case : List[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
_snake_case : List[str] = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase__ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : str = '''T5Config'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[str] = jnp.zeros_like(__UpperCamelCase )
_snake_case : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_snake_case : str = shifted_input_ids.at[:, 0].set(__UpperCamelCase )
_snake_case : Optional[int] = jnp.where(shifted_input_ids == -1_00 , __UpperCamelCase , __UpperCamelCase )
return shifted_input_ids
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "mt5"
_UpperCamelCase : List[str] = MTaConfig
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : List[Any] = "mt5"
_UpperCamelCase : Optional[int] = MTaConfig
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : List[Any] = "mt5"
_UpperCamelCase : Tuple = MTaConfig
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
from ... import PretrainedConfig
lowercase_ : Any = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
_UpperCamelCase : str = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_UpperCamelCase : Optional[Any] = 'nezha'
def __init__( self : Dict , lowerCamelCase_ : List[str]=2_11_28 , lowerCamelCase_ : Any=7_68 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : Optional[Any]=30_72 , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=5_12 , lowerCamelCase_ : Any=64 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : List[Any]=1e-12 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : int=0 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : int=True , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : Union[str, Any] = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Tuple = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Any = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : int = max_position_embeddings
_snake_case : Optional[int] = max_relative_position
_snake_case : Union[str, Any] = type_vocab_size
_snake_case : Optional[int] = initializer_range
_snake_case : Any = layer_norm_eps
_snake_case : List[str] = classifier_dropout
_snake_case : str = use_cache
| 714 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
lowercase_ : List[Any] = TypeVar('''T''')
class lowercase ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : T ):
'''simple docstring'''
_snake_case : Tuple = data
_snake_case : Tuple = self
_snake_case : Union[str, Any] = 0
class lowercase ( Generic[T] ):
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : T ):
'''simple docstring'''
_snake_case : List[Any] = DisjointSetTreeNode(__lowercase )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : T ):
'''simple docstring'''
_snake_case : str = self.map[data]
if elem_ref != elem_ref.parent:
_snake_case : int = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : DisjointSetTreeNode[T] , lowerCamelCase_ : DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
_snake_case : Optional[int] = nodea
else:
_snake_case : str = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : T , lowerCamelCase_ : T ):
'''simple docstring'''
self.link(self.find_set(__lowercase ) , self.find_set(__lowercase ) )
class lowercase ( Generic[T] ):
"""simple docstring"""
def __init__( self : int ):
'''simple docstring'''
_snake_case : Optional[Any] = {}
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : T ):
'''simple docstring'''
if node not in self.connections:
_snake_case : Optional[int] = {}
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : T , lowerCamelCase_ : T , lowerCamelCase_ : int ):
'''simple docstring'''
self.add_node(__lowercase )
self.add_node(__lowercase )
_snake_case : str = weight
_snake_case : Optional[Any] = weight
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = []
_snake_case : Optional[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCamelCase_ : x[2] )
# creating the disjoint set
_snake_case : List[Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__lowercase )
# MST generation
_snake_case : str = 0
_snake_case : Optional[Any] = 0
_snake_case : Union[str, Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_snake_case , _snake_case , _snake_case : Tuple = edges[index]
index += 1
_snake_case : Optional[Any] = disjoint_set.find_set(__lowercase )
_snake_case : Optional[Any] = disjoint_set.find_set(__lowercase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__lowercase , __lowercase , __lowercase )
disjoint_set.union(__lowercase , __lowercase )
return graph
| 715 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 0 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
UpperCamelCase_ : Any = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def A__( __lowerCAmelCase = "mumbai" ):
_snake_case : Tuple = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
_snake_case : Any = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
_snake_case : Tuple = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F'''Job {i:>2} is {job[0]} at {job[1]}''')
| 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
"""simple docstring"""
_UpperCamelCase : Tuple = ["""image_processor""", """tokenizer"""]
_UpperCamelCase : List[Any] = """BridgeTowerImageProcessor"""
_UpperCamelCase : Optional[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__( self : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict = None , lowerCamelCase_ : Tuple = True , lowerCamelCase_ : Any = False , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : List[str] = None , lowerCamelCase_ : List[str] = 0 , lowerCamelCase_ : Tuple = None , lowerCamelCase_ : Union[str, Any] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Tuple = False , lowerCamelCase_ : Optional[int] = False , lowerCamelCase_ : Union[str, Any] = False , lowerCamelCase_ : List[Any] = False , lowerCamelCase_ : List[Any] = True , lowerCamelCase_ : List[str] = None , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer(
text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , )
# add pixel_values + pixel_mask
_snake_case : Optional[Any] = self.image_processor(
lowerCamelCase_ , return_tensors=lowerCamelCase_ , do_normalize=lowerCamelCase_ , do_center_crop=lowerCamelCase_ , **lowerCamelCase_ )
encoding.update(lowerCamelCase_ )
return encoding
def __UpperCAmelCase ( self : Dict , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[str] = self.tokenizer.model_input_names
_snake_case : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
def A__( __lowerCAmelCase ):
_snake_case : Dict = [0] * len(__lowerCAmelCase )
for i in range(1 , len(__lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
_snake_case : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_snake_case : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_snake_case : Dict = j
return prefix_result
def A__( __lowerCAmelCase ):
return max(prefix_function(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=lowerCAmelCase_ , )
assert hasattr(self , 'env' )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = f'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}'''
# distributed data settings
_snake_case : Optional[int] = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCAmelCase_ , instance_count=lowerCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase_ , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase_ , py_version='py36' , )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
TrainingJobAnalytics(lowerCAmelCase_ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : str = self.create_estimator(lowerCAmelCase_ )
# run training
estimator.fit()
# result dataframe
_snake_case : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case : Any = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_snake_case : str = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , lowerCAmelCase_ )
| 719 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Optional[Any] = logging.get_logger(__name__)
lowercase_ : Any = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase ( __lowercase ):
"""simple docstring"""
_UpperCamelCase : int = '''trocr'''
_UpperCamelCase : int = ['''past_key_values''']
_UpperCamelCase : Optional[Any] = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self : Union[str, Any] , lowerCamelCase_ : Dict=5_02_65 , lowerCamelCase_ : str=10_24 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : List[str]=16 , lowerCamelCase_ : List[str]=40_96 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=5_12 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : Optional[int]=0 , lowerCamelCase_ : Any=2 , **lowerCamelCase_ : Any , ):
'''simple docstring'''
_snake_case : Optional[int] = vocab_size
_snake_case : int = d_model
_snake_case : Tuple = decoder_layers
_snake_case : Optional[int] = decoder_attention_heads
_snake_case : Tuple = decoder_ffn_dim
_snake_case : List[str] = activation_function
_snake_case : List[str] = max_position_embeddings
_snake_case : Optional[Any] = dropout
_snake_case : Dict = attention_dropout
_snake_case : Dict = activation_dropout
_snake_case : Any = init_std
_snake_case : List[Any] = decoder_layerdrop
_snake_case : Optional[int] = use_cache
_snake_case : Tuple = scale_embedding
_snake_case : int = use_learned_position_embeddings
_snake_case : str = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_snake_case : Optional[int] = str(bin(_UpperCamelCase ) )[2:] # remove the leading "0b"
_snake_case : Optional[int] = str(bin(_UpperCamelCase ) )[2:] # remove the leading "0b"
_snake_case : List[str] = max(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCamelCase ) , b_binary.zfill(_UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase=False ) -> str:
_snake_case : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_snake_case : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> int:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case : Optional[Any] = ''
else:
_snake_case : Optional[Any] = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case : Tuple = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_snake_case : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case : Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
_snake_case : Tuple = in_proj_bias[: config.hidden_size]
_snake_case : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_snake_case : Dict = in_proj_bias[-config.hidden_size :]
def A__( __lowerCAmelCase ) -> List[str]:
_snake_case : List[Any] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
_snake_case : Union[str, Any] = dct.pop(__lowerCAmelCase )
_snake_case : Optional[Any] = val
def A__( ) -> List[str]:
_snake_case : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_snake_case : Optional[Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
_snake_case : str = ViTConfig()
_snake_case : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_snake_case : Optional[int] = True
_snake_case : Optional[Any] = int(vit_name[-12:-10] )
_snake_case : str = int(vit_name[-9:-6] )
else:
_snake_case : Any = 10_00
_snake_case : int = 'huggingface/label-files'
_snake_case : Tuple = 'imagenet-1k-id2label.json'
_snake_case : List[Any] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_snake_case : str = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_snake_case : Optional[int] = idalabel
_snake_case : Optional[Any] = {v: k for k, v in idalabel.items()}
_snake_case : List[Any] = int(vit_name[-6:-4] )
_snake_case : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('tiny' ):
_snake_case : List[Any] = 1_92
_snake_case : List[Any] = 7_68
_snake_case : List[str] = 12
_snake_case : Tuple = 3
elif vit_name[9:].startswith('small' ):
_snake_case : Union[str, Any] = 3_84
_snake_case : Any = 15_36
_snake_case : Union[str, Any] = 12
_snake_case : List[Any] = 6
else:
pass
else:
if vit_name[4:].startswith('small' ):
_snake_case : List[str] = 7_68
_snake_case : int = 23_04
_snake_case : Any = 8
_snake_case : int = 8
elif vit_name[4:].startswith('base' ):
pass
elif vit_name[4:].startswith('large' ):
_snake_case : Any = 10_24
_snake_case : Optional[int] = 40_96
_snake_case : int = 24
_snake_case : Optional[int] = 16
elif vit_name[4:].startswith('huge' ):
_snake_case : int = 12_80
_snake_case : List[Any] = 51_20
_snake_case : int = 32
_snake_case : Tuple = 16
# load original model from timm
_snake_case : Any = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__lowerCAmelCase )
_snake_case : Optional[Any] = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_snake_case : Dict = ViTModel(__lowerCAmelCase ).eval()
else:
_snake_case : Any = ViTForImageClassification(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_snake_case : Optional[Any] = DeiTImageProcessor(size=config.image_size )
else:
_snake_case : Optional[int] = ViTImageProcessor(size=config.image_size )
_snake_case : Tuple = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case : Dict = encoding['pixel_values']
_snake_case : Any = model(__lowerCAmelCase )
if base_model:
_snake_case : Tuple = timm_model.forward_features(__lowerCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__lowerCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
_snake_case : Optional[int] = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase_ : Dict = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : int = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
lowercase_ : Any = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowercase_ : Dict = 0
lowercase_ : List[Any] = 0Xe_000
lowercase_ : Dict = 0Xe_001
lowercase_ : Union[str, Any] = 0Xe_002
lowercase_ : List[str] = 0Xe_003
lowercase_ : Dict = 0Xe_004
# Maps special codepoints to human-readable names.
lowercase_ : List[str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '''[CLS]''',
SEP: '''[SEP]''',
BOS: '''[BOS]''',
MASK: '''[MASK]''',
PAD: '''[PAD]''',
RESERVED: '''[RESERVED]''',
}
# Maps special codepoint human-readable names to their codepoint values.
lowercase_ : List[Any] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=chr(lowerCamelCase_ ) , lowerCamelCase_ : Dict=chr(lowerCamelCase_ ) , lowerCamelCase_ : Any=chr(lowerCamelCase_ ) , lowerCamelCase_ : Union[str, Any]=chr(lowerCamelCase_ ) , lowerCamelCase_ : Optional[Any]=chr(lowerCamelCase_ ) , lowerCamelCase_ : Any=chr(lowerCamelCase_ ) , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : List[Any]=20_48 , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
_snake_case : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
_snake_case : Union[str, Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
_snake_case : List[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
_snake_case : List[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
_snake_case : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case : Tuple = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , model_max_length=lowerCamelCase_ , **lowerCamelCase_ , )
# Creates a mapping for looking up the IDs of special symbols.
_snake_case : Optional[int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
_snake_case : Any = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
_snake_case : List[str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
_snake_case : Optional[int] = UNICODE_VOCAB_SIZE
_snake_case : Union[str, Any] = len(self._special_codepoints )
@property
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
return self._unicode_vocab_size
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str ):
'''simple docstring'''
return list(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
try:
return ord(lowerCamelCase_ )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : int ):
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(lowerCamelCase_ )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return "".join(lowerCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : List[str] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
_snake_case : int = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
_snake_case : int = [1] + ([0] * len(lowerCamelCase_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(lowerCamelCase_ )) + [1]
return result
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Optional[int] = [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
_snake_case : List[str] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
return ()
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
from collections import defaultdict
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Tuple = first_str.lower().strip()
_snake_case : List[Any] = second_str.lower().strip()
# Remove whitespace
_snake_case : Optional[int] = first_str.replace(' ' , '' )
_snake_case : List[Any] = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
_snake_case : str = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ : int = input('''Enter the first string ''').strip()
lowercase_ : int = input('''Enter the second string ''').strip()
lowercase_ : List[str] = check_anagrams(input_a, input_b)
print(F'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase ( a__ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = SMALL_MODEL_IDENTIFIER
_snake_case : Optional[int] = '''pt'''
_snake_case : Optional[Any] = '''tf'''
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : List[str] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowercase__ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : Dict = TFAutoModel.from_pretrained(self.test_model , from_pt=lowercase__ )
model_tf.save_pretrained(lowercase__ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = '''mock_framework'''
# Framework provided - return whatever the user provides
_snake_case : Tuple = FeaturesManager.determine_framework(self.test_model , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase__ )
_snake_case : Optional[Any] = FeaturesManager.determine_framework(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase__ )
_snake_case : Tuple = FeaturesManager.determine_framework(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase__ )
_snake_case : Dict = FeaturesManager.determine_framework(lowercase__ )
self.assertEqual(lowercase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase__ )
_snake_case : Optional[int] = FeaturesManager.determine_framework(lowercase__ )
self.assertEqual(lowercase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowercase__ ):
_snake_case : Union[str, Any] = FeaturesManager.determine_framework(lowercase__ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = MagicMock(return_value=lowercase__ )
with patch('transformers.onnx.features.is_tf_available' , lowercase__ ):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_snake_case : Optional[Any] = MagicMock(return_value=lowercase__ )
with patch('transformers.onnx.features.is_torch_available' , lowercase__ ):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_tf )
# Both in environment -> use PyTorch
_snake_case : Optional[int] = MagicMock(return_value=lowercase__ )
_snake_case : str = MagicMock(return_value=lowercase__ )
with patch('transformers.onnx.features.is_tf_available' , lowercase__ ), patch(
'transformers.onnx.features.is_torch_available' , lowercase__ ):
_snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_pt )
# Both not in environment -> raise error
_snake_case : Dict = MagicMock(return_value=lowercase__ )
_snake_case : Optional[int] = MagicMock(return_value=lowercase__ )
with patch('transformers.onnx.features.is_tf_available' , lowercase__ ), patch(
'transformers.onnx.features.is_torch_available' , lowercase__ ):
with self.assertRaises(lowercase__ ):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def A__( ):
_snake_case : int = 10
_snake_case : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
_snake_case : Tuple = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(a_ ) ),
} , features=a_ , )
return dataset
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=a_ )
return filename
# FILE_CONTENT + files
lowercase_ : Tuple = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : int = tmp_path_factory.mktemp('data' ) / '''file.txt'''
_snake_case : Optional[int] = FILE_CONTENT
with open(a_ , 'w' ) as f:
f.write(a_ )
return filename
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
import bza
_snake_case : List[Any] = tmp_path_factory.mktemp('data' ) / '''file.txt.bz2'''
_snake_case : Any = bytes(a_ , 'utf-8' )
with bza.open(a_ , 'wb' ) as f:
f.write(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
import gzip
_snake_case : Any = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
_snake_case : str = bytes(a_ , 'utf-8' )
with gzip.open(a_ , 'wb' ) as f:
f.write(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_snake_case : Union[str, Any] = tmp_path_factory.mktemp('data' ) / '''file.txt.lz4'''
_snake_case : List[Any] = bytes(a_ , 'utf-8' )
with lza.frame.open(a_ , 'wb' ) as f:
f.write(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_snake_case : Dict = tmp_path_factory.mktemp('data' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(a_ , 'w' ) as archive:
archive.write(a_ , arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
import tarfile
_snake_case : Tuple = tmp_path_factory.mktemp('data' ) / '''file.txt.tar'''
with tarfile.TarFile(a_ , 'w' ) as f:
f.add(a_ , arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
import lzma
_snake_case : int = tmp_path_factory.mktemp('data' ) / '''file.txt.xz'''
_snake_case : Tuple = bytes(a_ , 'utf-8' )
with lzma.open(a_ , 'wb' ) as f:
f.write(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
import zipfile
_snake_case : Dict = tmp_path_factory.mktemp('data' ) / '''file.txt.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_snake_case : str = tmp_path_factory.mktemp('data' ) / '''file.txt.zst'''
_snake_case : Tuple = bytes(a_ , 'utf-8' )
with zstd.open(a_ , 'wb' ) as f:
f.write(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : str = tmp_path_factory.mktemp('data' ) / '''file.xml'''
_snake_case : int = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return filename
lowercase_ : Tuple = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowercase_ : List[str] = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowercase_ : Dict = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowercase_ : Tuple = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowercase_ : Dict = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='session' )
def A__( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : List[str] = datasets.Dataset.from_dict(a_ )
_snake_case : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(a_ ) ) as con:
_snake_case : Union[str, Any] = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(a_ , 'w' , newline='' ) as f:
_snake_case : Optional[int] = csv.DictWriter(a_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(a_ , 'w' , newline='' ) as f:
_snake_case : Dict = csv.DictWriter(a_ , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
import bza
_snake_case : Optional[Any] = tmp_path_factory.mktemp('data' ) / '''dataset.csv.bz2'''
with open(a_ , 'rb' ) as f:
_snake_case : Union[str, Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a_ , 'wb' ) as f:
f.write(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tmp_path_factory.mktemp('data' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.basename(a_ ) )
f.write(a_ , arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = tmp_path_factory.mktemp('data' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(a_ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[str] = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.join('main_dir' , os.path.basename(a_ ) ) )
f.write(a_ , arcname=os.path.join('main_dir' , os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
_snake_case : Any = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(a_ , 'wb' ) as f:
_snake_case : Dict = pq.ParquetWriter(a_ , schema=a_ )
_snake_case : Tuple = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a_ ) )] for k in DATA[0]} , schema=a_ )
writer.write_table(a_ )
writer.close()
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_snake_case : int = {'''data''': DATA}
with open(a_ , 'w' ) as f:
json.dump(a_ , a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
_snake_case : str = {'''data''': DATA_DICT_OF_LISTS}
with open(a_ , 'w' ) as f:
json.dump(a_ , a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(a_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : int = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(a_ , 'w' ) as f:
for item in DATA:
f.write(json.dumps(a_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(a_ , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(a_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(a_ , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(a_ ) + '\n' )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
import gzip
_snake_case : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(a_ , 'rb' ) as orig_file:
with gzip.open(a_ , 'wb' ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
import gzip
_snake_case : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(a_ , 'rb' ) as orig_file:
with gzip.open(a_ , 'wb' ) as zipped_file:
zipped_file.writelines(a_ )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = tmp_path_factory.mktemp('data' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.basename(a_ ) )
f.write(a_ , arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[str] = tmp_path_factory.mktemp('data' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.join('nested' , os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.join('main_dir' , os.path.basename(a_ ) ) )
f.write(a_ , arcname=os.path.join('main_dir' , os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = tmp_path_factory.mktemp('data' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(a_ , 'w' ) as f:
f.add(a_ , arcname=os.path.basename(a_ ) )
f.add(a_ , arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = tmp_path_factory.mktemp('data' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(a_ , 'w' ) as f:
f.add(a_ , arcname=os.path.join('nested' , os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : List[str] = ['''0''', '''1''', '''2''', '''3''']
_snake_case : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(a_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Optional[int] = ['''0''', '''1''', '''2''', '''3''']
_snake_case : Any = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(a_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = ['''0''', '''1''', '''2''', '''3''']
_snake_case : Any = tmp_path_factory.mktemp('data' ) / '''dataset.abc'''
with open(a_ , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = tmp_path_factory.mktemp('data' ) / '''dataset.text.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.basename(a_ ) )
f.write(a_ , arcname=os.path.basename(a_ ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : int = tmp_path_factory.mktemp('data' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.join('main_dir' , os.path.basename(a_ ) ) )
f.write(a_ , arcname=os.path.join('main_dir' , os.path.basename(a_ ) ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : int = tmp_path_factory.mktemp('data' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.basename('unsupported.ext' ) )
f.write(a_ , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : Optional[int] = '''\n'''.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
_snake_case : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(a_ , 'w' , encoding='utf-8' ) as f:
f.write(a_ )
return path
@pytest.fixture(scope='session' )
def A__( ):
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def A__( ):
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = tmp_path_factory.mktemp('data' ) / '''dataset.img.zip'''
with zipfile.ZipFile(a_ , 'w' ) as f:
f.write(a_ , arcname=os.path.basename(a_ ) )
f.write(a_ , arcname=os.path.basename(a_ ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def A__( __lowerCAmelCase ):
_snake_case : List[str] = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = np.full((len(__lowerCAmelCase ), sequence_length, 2) , __lowerCAmelCase )
else:
_snake_case : List[str] = np.full((len(__lowerCAmelCase ), sequence_length) , __lowerCAmelCase )
for i, tensor in enumerate(__lowerCAmelCase ):
if padding_side == "right":
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tensor[:sequence_length]
else:
_snake_case : List[Any] = tensor[:sequence_length]
else:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = tensor[:sequence_length]
else:
_snake_case : List[Any] = tensor[:sequence_length]
return out_tensor.tolist()
def A__( __lowerCAmelCase ):
_snake_case : str = ord(__lowerCAmelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_snake_case : Tuple = unicodedata.category(__lowerCAmelCase )
if cat.startswith('P' ):
return True
return False
@dataclass
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : PreTrainedTokenizerBase
_UpperCamelCase : Union[bool, str, PaddingStrategy] = True
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : int = -100
_UpperCamelCase : str = "pt"
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict ):
'''simple docstring'''
import torch
_snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels'
_snake_case : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_snake_case : Dict = self.tokenizer.pad(
lowerCamelCase_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
_snake_case : int = torch.tensor(batch['entity_ids'] ).shape[1]
_snake_case : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
_snake_case : List[str] = [
list(lowerCamelCase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) for label in labels
]
else:
_snake_case : Optional[Any] = [
[self.label_pad_token_id] * (sequence_length - len(lowerCamelCase_ )) + list(lowerCamelCase_ ) for label in labels
]
_snake_case : Tuple = [feature['ner_tags'] for feature in features]
_snake_case : Dict = padding_tensor(lowerCamelCase_ , -1 , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = [feature['original_entity_spans'] for feature in features]
_snake_case : Union[str, Any] = padding_tensor(lowerCamelCase_ , (-1, -1) , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = {k: torch.tensor(lowerCamelCase_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
def A__( ):
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowercase_ : List[str] = generate_large_matrix()
lowercase_ : Optional[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def A__( __lowerCAmelCase ):
assert all(row == sorted(lowercase_ , reverse=lowercase_ ) for row in grid )
assert all(list(lowercase_ ) == sorted(lowercase_ , reverse=lowercase_ ) for col in zip(*lowercase_ ) )
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = 0
_snake_case : str = len(lowercase_ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_snake_case : Dict = (left + right) // 2
_snake_case : str = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_snake_case : Optional[Any] = mid + 1
else:
_snake_case : List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase_ )
def A__( __lowerCAmelCase ):
_snake_case : Any = 0
_snake_case : List[str] = len(grid[0] )
for i in range(len(lowercase_ ) ):
_snake_case : List[str] = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase_ ) * len(grid[0] )) - total
def A__( __lowerCAmelCase ):
return len([number for row in grid for number in row if number < 0] )
def A__( __lowerCAmelCase ):
_snake_case : List[Any] = 0
for row in grid:
for i, number in enumerate(lowercase_ ):
if number < 0:
total += len(lowercase_ ) - i
break
return total
def A__( ):
from timeit import timeit
print('Running benchmarks' )
_snake_case : List[str] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_snake_case : List[str] = timeit(F'''{func}(grid=grid)''' , setup=lowercase_ , number=5_00 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : int = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "trocr"
_UpperCamelCase : List[Any] = ["past_key_values"]
_UpperCamelCase : List[Any] = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : Tuple , lowerCamelCase_ : Any=5_02_65 , lowerCamelCase_ : Optional[Any]=10_24 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : str=40_96 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : List[Any]=5_12 , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Union[str, Any]=2 , **lowerCamelCase_ : Optional[int] , ):
'''simple docstring'''
_snake_case : Dict = vocab_size
_snake_case : int = d_model
_snake_case : Any = decoder_layers
_snake_case : List[Any] = decoder_attention_heads
_snake_case : str = decoder_ffn_dim
_snake_case : Optional[int] = activation_function
_snake_case : Tuple = max_position_embeddings
_snake_case : Dict = dropout
_snake_case : str = attention_dropout
_snake_case : Optional[int] = activation_dropout
_snake_case : List[str] = init_std
_snake_case : Dict = decoder_layerdrop
_snake_case : Dict = use_cache
_snake_case : int = scale_embedding
_snake_case : str = use_learned_position_embeddings
_snake_case : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , **_snake_case , )
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_snake_case : Union[str, Any] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(__lowerCAmelCase )}'''
)
raise ValueError(__lowerCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def A__( __lowerCAmelCase ):
_snake_case : List[Any] = split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
_snake_case : Tuple = SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_snake_case : Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
_snake_case : List[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name='my_dataset' )] )
def A__( __lowerCAmelCase ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_snake_case : List[str] = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : List[Any] = 'Hello, World!'
lowercase_ : str = 'en_XX'
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = Path('data_bin' )
_snake_case : int = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__UpperCamelCase ).parent ) , checkpoint_file=Path(__UpperCamelCase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(__UpperCamelCase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(__UpperCamelCase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(__UpperCamelCase )
_snake_case : List[Any] = xmod.model.encoder.sentence_encoder
_snake_case : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
_snake_case : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('Our X-MOD config:' , __UpperCamelCase )
_snake_case : Optional[int] = XmodForSequenceClassification(__UpperCamelCase ) if classification_head else XmodForMaskedLM(__UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_snake_case : List[Any] = xmod_sent_encoder.embed_tokens.weight
_snake_case : int = xmod_sent_encoder.embed_positions.weight
_snake_case : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
_snake_case : Optional[int] = xmod_sent_encoder.layernorm_embedding.weight
_snake_case : Optional[int] = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_snake_case : Optional[int] = model.roberta.encoder.layer[i]
_snake_case : int = xmod_sent_encoder.layers[i]
# self attention
_snake_case : Any = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
_snake_case : Dict = xmod_layer.self_attn.q_proj.weight
_snake_case : Optional[Any] = xmod_layer.self_attn.q_proj.bias
_snake_case : Optional[Any] = xmod_layer.self_attn.k_proj.weight
_snake_case : List[str] = xmod_layer.self_attn.k_proj.bias
_snake_case : Any = xmod_layer.self_attn.v_proj.weight
_snake_case : int = xmod_layer.self_attn.v_proj.bias
# self-attention output
_snake_case : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
_snake_case : Any = xmod_layer.self_attn.out_proj.weight
_snake_case : List[Any] = xmod_layer.self_attn.out_proj.bias
_snake_case : Tuple = xmod_layer.self_attn_layer_norm.weight
_snake_case : Optional[Any] = xmod_layer.self_attn_layer_norm.bias
# intermediate
_snake_case : int = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
_snake_case : List[str] = xmod_layer.fca.weight
_snake_case : Any = xmod_layer.fca.bias
# output
_snake_case : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
_snake_case : Any = xmod_layer.fca.weight
_snake_case : int = xmod_layer.fca.bias
_snake_case : List[Any] = xmod_layer.final_layer_norm.weight
_snake_case : List[str] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
_snake_case : Dict = xmod_layer.adapter_layer_norm.weight
_snake_case : List[str] = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
_snake_case : List[str] = bert_output.adapter_modules[lang_code]
_snake_case : Dict = xmod_layer.adapter_modules[lang_code]
_snake_case : Dict = from_adapter.fca.weight
_snake_case : List[str] = from_adapter.fca.bias
_snake_case : Dict = from_adapter.fca.weight
_snake_case : Dict = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
_snake_case : Dict = xmod_sent_encoder.layer_norm.weight
_snake_case : int = xmod_sent_encoder.layer_norm.bias
if classification_head:
_snake_case : Any = xmod.model.classification_heads['''mnli'''].dense.weight
_snake_case : int = xmod.model.classification_heads['''mnli'''].dense.bias
_snake_case : Union[str, Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
_snake_case : Optional[Any] = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_snake_case : List[str] = xmod.model.encoder.lm_head.dense.weight
_snake_case : int = xmod.model.encoder.lm_head.dense.bias
_snake_case : Optional[int] = xmod.model.encoder.lm_head.layer_norm.weight
_snake_case : List[str] = xmod.model.encoder.lm_head.layer_norm.bias
_snake_case : str = xmod.model.encoder.lm_head.weight
_snake_case : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
_snake_case : List[Any] = xmod.encode(__UpperCamelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__UpperCamelCase )
_snake_case : str = model(__UpperCamelCase )[0]
if classification_head:
_snake_case : Dict = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__UpperCamelCase ) )
else:
_snake_case : Optional[Any] = xmod.model(__UpperCamelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
_snake_case : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_snake_case : int = torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(__UpperCamelCase ).mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowercase_ : Dict = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
def update_area_of_max_square(__lowerCAmelCase , __lowerCAmelCase ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_snake_case : List[str] = update_area_of_max_square(lowerCamelCase_ , col + 1 )
_snake_case : Dict = update_area_of_max_square(row + 1 , col + 1 )
_snake_case : Any = update_area_of_max_square(row + 1 , lowerCamelCase_ )
if mat[row][col]:
_snake_case : Dict = 1 + min([right, diagonal, down] )
_snake_case : Optional[int] = max(largest_square_area[0] , lowerCamelCase_ )
return sub_problem_sol
else:
return 0
_snake_case : List[str] = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
def update_area_of_max_square_using_dp_array(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_snake_case : List[str] = update_area_of_max_square_using_dp_array(lowerCamelCase_ , col + 1 , lowerCamelCase_ )
_snake_case : List[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowerCamelCase_ )
_snake_case : Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , lowerCamelCase_ , lowerCamelCase_ )
if mat[row][col]:
_snake_case : List[Any] = 1 + min([right, diagonal, down] )
_snake_case : int = max(largest_square_area[0] , lowerCamelCase_ )
_snake_case : int = sub_problem_sol
return sub_problem_sol
else:
return 0
_snake_case : Optional[Any] = [0]
_snake_case : List[str] = [[-1] * cols for _ in range(lowerCamelCase_ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowerCamelCase_ )
return largest_square_area[0]
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = [[0] * (cols + 1) for _ in range(rows + 1 )]
_snake_case : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_snake_case : Tuple = dp_array[row][col + 1]
_snake_case : Optional[int] = dp_array[row + 1][col + 1]
_snake_case : str = dp_array[row + 1][col]
if mat[row][col] == 1:
_snake_case : int = 1 + min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[Any] = max(dp_array[row][col] , lowerCamelCase_ )
else:
_snake_case : Optional[int] = 0
return largest_square_area
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Tuple = [0] * (cols + 1)
_snake_case : Optional[Any] = [0] * (cols + 1)
_snake_case : Optional[int] = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_snake_case : Union[str, Any] = current_row[col + 1]
_snake_case : Optional[int] = next_row[col + 1]
_snake_case : int = next_row[col]
if mat[row][col] == 1:
_snake_case : Union[str, Any] = 1 + min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : str = max(current_row[col] , lowerCamelCase_ )
else:
_snake_case : Optional[Any] = 0
_snake_case : str = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : List[str] = {
"configuration_xlm_roberta_xl": [
"XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaXLConfig",
"XLMRobertaXLOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
"XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaXLForCausalLM",
"XLMRobertaXLForMaskedLM",
"XLMRobertaXLForMultipleChoice",
"XLMRobertaXLForQuestionAnswering",
"XLMRobertaXLForSequenceClassification",
"XLMRobertaXLForTokenClassification",
"XLMRobertaXLModel",
"XLMRobertaXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowercase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A__( __lowerCAmelCase=None , __lowerCAmelCase=None ):
return field(default_factory=lambda: default , metadata=a__ )
@dataclass
class lowercase :
"""simple docstring"""
_UpperCamelCase : str = field(
metadata={"help": "The csv file to plot."} , )
_UpperCamelCase : bool = field(
default=__lowercase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
_UpperCamelCase : bool = field(
default=__lowercase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
_UpperCamelCase : bool = field(
default=__lowercase , metadata={"help": "Disable logarithmic scale when plotting"} , )
_UpperCamelCase : bool = field(
default=__lowercase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
_UpperCamelCase : Optional[str] = field(
default=__lowercase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
_UpperCamelCase : Optional[List[str]] = list_field(
default=__lowercase , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def A__( __lowerCAmelCase ):
try:
int(a__ )
return True
except ValueError:
return False
def A__( __lowerCAmelCase ):
try:
float(a__ )
return True
except ValueError:
return False
class lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : str = args
_snake_case : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='' ) as csv_file:
_snake_case : Union[str, Any] = csv.DictReader(__A )
for row in reader:
_snake_case : List[str] = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_snake_case : str = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_snake_case : List[Any] = float(row['result'] )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case , _snake_case : str = plt.subplots()
_snake_case : List[Any] = 'Time usage' if self.args.is_time else 'Memory usage'
_snake_case : Union[str, Any] = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_snake_case : Any = sorted(set(self.result_dict[model_name]['bsz'] ) )
_snake_case : Dict = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_snake_case : int = self.result_dict[model_name]['result']
((_snake_case) , (_snake_case)) : List[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_snake_case : str = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_snake_case : Dict = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__A , )
else:
_snake_case : List[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_snake_case) , (_snake_case)) : Dict = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_snake_case : Any = np.asarray(__A , __A )[: len(__A )]
plt.scatter(
__A , __A , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__A , __A , '--' )
title_str += f''' {label_model_name} vs.'''
_snake_case : List[Any] = title_str[:-4]
_snake_case : Optional[Any] = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__A )
plt.xlabel(__A )
plt.ylabel(__A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def A__( ):
_snake_case : List[Any] = HfArgumentParser(a__ )
_snake_case : Optional[Any] = parser.parse_args_into_dataclasses()[0]
_snake_case : int = Plot(args=a__ )
plot.plot()
if __name__ == "__main__":
main()
| 714 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
lowercase_ : Optional[Any] = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = set()
# keep track of all the paths to be checked
_snake_case : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_snake_case : Optional[int] = queue.pop(0 )
# get the last node from the path
_snake_case : str = path[-1]
if node not in explored:
_snake_case : Any = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_snake_case : Optional[int] = list(UpperCamelCase__ )
new_path.append(UpperCamelCase__ )
queue.append(UpperCamelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCamelCase__ )
# in case there's no path between the 2 nodes
return []
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_snake_case : List[Any] = [start]
_snake_case : List[Any] = set(UpperCamelCase__ )
# Keep tab on distances from `start` node.
_snake_case : int = {start: 0, target: -1}
while queue:
_snake_case : Dict = queue.pop(0 )
if node == target:
_snake_case : List[str] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCamelCase__ )
queue.append(UpperCamelCase__ )
_snake_case : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 715 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 0 |
def A__( __lowerCAmelCase = 10_00 ):
_snake_case : List[Any] = -1
_snake_case : str = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_snake_case : Optional[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
_snake_case : Any = n - a - b
if c * c == (a * a + b * b):
_snake_case : List[Any] = a * b * c
if candidate >= product:
_snake_case : str = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 0 |
'''simple docstring'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
_snake_case : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCAmelCase ) )
return round(__lowerCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
_UpperCamelCase : str = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Dict = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
_snake_case : int = prefix_inner_dim
_snake_case : List[Any] = prefix_hidden_dim
_snake_case : Optional[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_snake_case : List[str] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_snake_case : str = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
_snake_case : Optional[int] = GPTaLMHeadModel(__A )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
_snake_case : Optional[int] = self.transformer.transformer.wte(__A )
_snake_case : Optional[int] = self.encode_prefix(__A )
_snake_case : Tuple = self.decode_prefix(__A )
_snake_case : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_snake_case : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_snake_case : Optional[int] = torch.cat((dummy_token, input_ids) , dim=1 )
_snake_case : Dict = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return self.encode_prefix(__A )
@torch.no_grad()
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : int = torch.split(__A , 1 , dim=0 )
_snake_case : int = []
_snake_case : Union[str, Any] = []
for feature in features:
_snake_case : Any = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
_snake_case , _snake_case : Dict = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_snake_case : Any = torch.stack(__A )
_snake_case : Any = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __UpperCAmelCase ( self : str , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
_snake_case : Optional[int] = eos_token_id
_snake_case : Dict = None
_snake_case : List[Any] = None
_snake_case : Any = torch.ones(__A , device=__A , dtype=torch.int )
_snake_case : Optional[Any] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
_snake_case : List[Any] = input_embeds
else:
_snake_case : Tuple = self.transformer.transformer.wte(__A )
for i in range(__A ):
_snake_case : int = self.transformer(inputs_embeds=__A )
_snake_case : str = outputs.logits
_snake_case : Any = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_snake_case : Optional[int] = logits.softmax(-1 ).log()
if scores is None:
_snake_case , _snake_case : Dict = logits.topk(__A , -1 )
_snake_case : int = generated.expand(__A , *generated.shape[1:] )
_snake_case , _snake_case : Optional[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_snake_case : str = next_tokens
else:
_snake_case : List[str] = tokens.expand(__A , *tokens.shape[1:] )
_snake_case : Dict = torch.cat((tokens, next_tokens) , dim=1 )
else:
_snake_case : Dict = -float(np.inf )
_snake_case : List[Any] = 0
_snake_case : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_snake_case : str = scores_sum / seq_lengths[:, None]
_snake_case , _snake_case : int = scores_sum_average.view(-1 ).topk(__A , -1 )
_snake_case : Dict = next_tokens // scores_sum.shape[1]
_snake_case : Optional[Any] = seq_lengths[next_tokens_source]
_snake_case : List[Any] = next_tokens % scores_sum.shape[1]
_snake_case : Union[str, Any] = next_tokens.unsqueeze(1 )
_snake_case : Optional[int] = tokens[next_tokens_source]
_snake_case : Dict = torch.cat((tokens, next_tokens) , dim=1 )
_snake_case : Any = generated[next_tokens_source]
_snake_case : Optional[Any] = scores_sum_average * seq_lengths
_snake_case : Any = is_stopped[next_tokens_source]
_snake_case : Optional[int] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_snake_case : int = torch.cat((generated, next_token_embed) , dim=1 )
_snake_case : Union[str, Any] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
_snake_case : Optional[Any] = scores / seq_lengths
_snake_case : Tuple = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
_snake_case : Optional[Any] = [tokens[i] for i in order]
_snake_case : Optional[int] = torch.stack(__A , dim=0 )
_snake_case : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 718 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 0 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class lowercase ( __snake_case ):
"""simple docstring"""
_UpperCamelCase : Any = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : int , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_snake_case : Optional[Any] = deprecated_arg[3:]
_snake_case : Optional[int] = not kwargs.pop(_lowercase )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
_snake_case : Union[str, Any] = kwargs.pop('tpu_name' , self.tpu_name )
_snake_case : List[Any] = kwargs.pop('device_idx' , self.device_idx )
_snake_case : Optional[Any] = kwargs.pop('eager_mode' , self.eager_mode )
_snake_case : Tuple = kwargs.pop('use_xla' , self.use_xla )
super().__init__(**_lowercase )
_UpperCamelCase : Union[str, Any] = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
_UpperCamelCase : Optional[Any] = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_UpperCamelCase : Optional[Any] = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
_UpperCamelCase : str = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
requires_backends(self , ['tf'] )
_snake_case : List[Any] = None
if self.tpu:
try:
if self.tpu_name:
_snake_case : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_snake_case : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_snake_case : List[Any] = None
return tpu
@cached_property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_snake_case : Optional[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
_snake_case : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
_snake_case : Optional[Any] = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
return self.n_gpu > 0
| 719 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 0 |
from __future__ import annotations
from math import pi
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowercase_ : Dict = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_snake_case , _snake_case , _snake_case , _snake_case : Dict = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_snake_case : Tuple = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
_snake_case : Tuple = config_class.from_json_file(lowerCAmelCase__ )
_snake_case : Optional[int] = True
_snake_case : Any = True
print(F'''Building TensorFlow model from configuration: {config}''' )
_snake_case : Optional[Any] = model_class(lowerCAmelCase__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_snake_case : List[Any] = cached_file(
lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_snake_case : Union[str, Any] = load_pytorch_checkpoint_in_tfa_model(lowerCAmelCase__ , lowerCAmelCase__ )
if compare_with_pt_model:
_snake_case : str = tf_model(tf_model.dummy_inputs , training=lowerCAmelCase__ ) # build the network
_snake_case : List[Any] = torch.load(lowerCAmelCase__ , map_location='cpu' )
_snake_case : Dict = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCAmelCase__ , config=lowerCAmelCase__ , state_dict=lowerCAmelCase__ )
with torch.no_grad():
_snake_case : Dict = pt_model(**pt_model.dummy_inputs )
_snake_case : Dict = pto[0].numpy()
_snake_case : List[str] = tfo[0].numpy()
_snake_case : Any = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(lowerCAmelCase__ , save_format='h5' )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ):
if args_model_type is None:
_snake_case : List[str] = list(MODEL_CLASSES.keys() )
else:
_snake_case : Optional[Any] = [args_model_type]
for j, model_type in enumerate(lowerCAmelCase__ , start=1 ):
print('=' * 1_00 )
print(F''' Converting model type {j}/{len(lowerCAmelCase__ )}: {model_type}''' )
print('=' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_snake_case : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_snake_case : Any = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCAmelCase__ , lowerCAmelCase__ ) , start=1 ):
print('-' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
_snake_case : List[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(lowerCAmelCase__ )}: {model_shortcut_name} - model_type {model_type}''' )
print('-' * 1_00 )
if config_shortcut_name in aws_config_map:
_snake_case : List[str] = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
else:
_snake_case : Tuple = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_snake_case : Any = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , force_download=not use_cached_models )
else:
_snake_case : Any = model_shortcut_name
if os.path.isfile(lowerCAmelCase__ ):
_snake_case : Dict = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=lowerCAmelCase__ , pytorch_checkpoint_path=lowerCAmelCase__ , config_file=lowerCAmelCase__ , tf_dump_path=os.path.join(lowerCAmelCase__ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=lowerCAmelCase__ , )
if remove_cached_files:
os.remove(lowerCAmelCase__ )
os.remove(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
lowercase_ : str = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 721 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Dict = IFInpaintingSuperResolutionPipeline
_UpperCamelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : Dict=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith('mps' ):
_snake_case : str = torch.manual_seed(UpperCamelCase__ )
else:
_snake_case : Optional[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
_snake_case : List[Any] = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_snake_case : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_snake_case : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
_snake_case : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
self._test_save_load_local()
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class lowercase :
"""simple docstring"""
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : torch.Tensor # [batch_size x 3]
_UpperCamelCase : int
_UpperCamelCase : int
_UpperCamelCase : float
_UpperCamelCase : float
_UpperCamelCase : Tuple[int]
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = torch.arange(self.height * self.width )
_snake_case : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(lowerCamelCase_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , *_snake_case : Union[str, Any] = self.shape
_snake_case : Any = int(np.prod(lowerCamelCase_ ) )
_snake_case : Union[str, Any] = self.get_image_coords()
_snake_case : Any = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_snake_case : Tuple = self.get_camera_rays(lowerCamelCase_ )
_snake_case : Optional[int] = rays.view(lowerCamelCase_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case , *_snake_case , _snake_case : int = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_snake_case : List[str] = coords.view(lowerCamelCase_ , -1 , 2 )
_snake_case : Union[str, Any] = self.resolution()
_snake_case : Union[str, Any] = self.fov()
_snake_case : Optional[Any] = (flat.float() / (res - 1)) * 2 - 1
_snake_case : List[Any] = fracs * torch.tan(fov / 2 )
_snake_case : str = fracs.view(lowerCamelCase_ , -1 , 2 )
_snake_case : Union[str, Any] = (
self.z.view(lowerCamelCase_ , 1 , 3 )
+ self.x.view(lowerCamelCase_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowerCamelCase_ , 1 , 3 ) * fracs[:, :, 1:]
)
_snake_case : Optional[Any] = directions / directions.norm(dim=-1 , keepdim=lowerCamelCase_ )
_snake_case : List[Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(lowerCamelCase_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowerCamelCase_ , *lowerCamelCase_ , 2 , 3 )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] ):
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowerCamelCase_ , height=lowerCamelCase_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def A__( __lowerCAmelCase ):
_snake_case : List[str] = []
_snake_case : str = []
_snake_case : Optional[Any] = []
_snake_case : int = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_snake_case : List[Any] = np.array([np.sin(__lowerCAmelCase ), np.cos(__lowerCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_snake_case : Any = -z * 4
_snake_case : Union[str, Any] = np.array([np.cos(__lowerCAmelCase ), -np.sin(__lowerCAmelCase ), 0.0] )
_snake_case : List[str] = np.cross(__lowerCAmelCase , __lowerCAmelCase )
origins.append(__lowerCAmelCase )
xs.append(__lowerCAmelCase )
ys.append(__lowerCAmelCase )
zs.append(__lowerCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__lowerCAmelCase , axis=0 ) ).float() , width=__lowerCAmelCase , height=__lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__lowerCAmelCase )) , )
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
import argparse
import copy
def A__( __lowerCAmelCase ):
_snake_case : Any = {}
with open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_snake_case : Tuple = []
_list.append([line.split()[1], line.split()[2]] )
_snake_case : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_snake_case : List[Any] = []
_list.append([line.split()[0], line.split()[2]] )
_snake_case : Union[str, Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def A__( __lowerCAmelCase , __lowerCAmelCase ):
with open(SCREAMING_SNAKE_CASE_ ) as f:
_snake_case : Optional[Any] = f.read(1 )
_snake_case : Any = start_node
_snake_case : Dict = []
_snake_case : Tuple = start_node
_snake_case : Dict = 0
while visiting not in first_solution:
_snake_case : List[Any] = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE_ ) and k[0] not in first_solution:
_snake_case : Dict = k[1]
_snake_case : Tuple = k[0]
first_solution.append(SCREAMING_SNAKE_CASE_ )
_snake_case : Tuple = distance_of_first_solution + int(SCREAMING_SNAKE_CASE_ )
_snake_case : int = best_node
first_solution.append(SCREAMING_SNAKE_CASE_ )
_snake_case : Optional[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_snake_case : int = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : int = []
for n in solution[1:-1]:
_snake_case : List[str] = solution.index(SCREAMING_SNAKE_CASE_ )
for kn in solution[1:-1]:
_snake_case : Optional[Any] = solution.index(SCREAMING_SNAKE_CASE_ )
if n == kn:
continue
_snake_case : str = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
_snake_case : Tuple = kn
_snake_case : Union[str, Any] = n
_snake_case : Optional[Any] = 0
for k in _tmp[:-1]:
_snake_case : Optional[Any] = _tmp[_tmp.index(SCREAMING_SNAKE_CASE_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_snake_case : Optional[int] = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_snake_case : Tuple = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __lowerCAmelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = 1
_snake_case : int = first_solution
_snake_case : Optional[Any] = []
_snake_case : int = distance_of_first_solution
_snake_case : int = solution
while count <= iters:
_snake_case : str = find_neighborhood(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_snake_case : int = 0
_snake_case : Dict = neighborhood[index_of_best_solution]
_snake_case : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) - 1
_snake_case : Dict = False
while not found:
_snake_case : List[str] = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
if best_solution[i] != solution[i]:
_snake_case : Dict = best_solution[i]
_snake_case : Optional[Any] = solution[i]
break
_snake_case : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_snake_case : Optional[Any] = True
_snake_case : Any = best_solution[:-1]
_snake_case : Optional[int] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_snake_case : Dict = cost
_snake_case : Dict = solution
else:
_snake_case : List[Any] = index_of_best_solution + 1
_snake_case : Tuple = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE_ ) >= size:
tabu_list.pop(0 )
_snake_case : Union[str, Any] = count + 1
return best_solution_ever, best_cost
def A__( __lowerCAmelCase=None ):
_snake_case : int = generate_neighbours(args.File )
_snake_case : Dict = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE_ )
_snake_case : str = tabu_search(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase_ : Tuple = TypeVar('''KEY''')
lowercase_ : Union[str, Any] = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class lowercase ( Generic[KEY, VAL] ):
"""simple docstring"""
_UpperCamelCase : List[Any] = 42
_UpperCamelCase : Optional[int] = 42
class lowercase ( _Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __bool__( self : Any ):
'''simple docstring'''
return False
lowercase_ : Dict = _DeletedItem()
class lowercase ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : int = 8 , lowerCamelCase_ : float = 0.75 ):
'''simple docstring'''
_snake_case : List[Any] = initial_block_size
_snake_case : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_snake_case : List[str] = capacity_factor
_snake_case : str = 0
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : KEY ):
'''simple docstring'''
return hash(lowerCamelCase_ ) % len(self._buckets )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : int ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : KEY , lowerCamelCase_ : VAL ):
'''simple docstring'''
_snake_case : Optional[Any] = self._buckets[ind]
if not stored:
_snake_case : Union[str, Any] = _Item(lowerCamelCase_ , lowerCamelCase_ )
self._len += 1
return True
elif stored.key == key:
_snake_case : Any = _Item(lowerCamelCase_ , lowerCamelCase_ )
return True
else:
return False
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
_snake_case : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Any = self._buckets
_snake_case : List[str] = [None] * new_size
_snake_case : Optional[Any] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : KEY ):
'''simple docstring'''
_snake_case : str = self._get_bucket_index(lowerCamelCase_ )
for _ in range(len(self._buckets ) ):
yield ind
_snake_case : int = self._get_next_ind(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : KEY , lowerCamelCase_ : VAL ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
if self._try_set(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
break
def __setitem__( self : List[Any] , lowerCamelCase_ : KEY , lowerCamelCase_ : VAL ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCamelCase_ , lowerCamelCase_ )
def __delitem__( self : List[str] , lowerCamelCase_ : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
_snake_case : Optional[int] = self._buckets[ind]
if item is None:
raise KeyError(lowerCamelCase_ )
if item is _deleted:
continue
if item.key == key:
_snake_case : int = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[Any] , lowerCamelCase_ : KEY ):
'''simple docstring'''
for ind in self._iterate_buckets(lowerCamelCase_ ):
_snake_case : Any = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCamelCase_ )
def __len__( self : int ):
'''simple docstring'''
return self._len
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = ' ,'.join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
import requests
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = {'Content-Type': 'application/json'}
_snake_case : int = requests.post(__lowercase , json={'text': message_body} , headers=__lowercase )
if response.status_code != 2_00:
_snake_case : List[Any] = (
'Request to slack returned an error '
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__lowercase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if index == number_of_items:
return 0
_snake_case : Dict = 0
_snake_case : List[str] = 0
_snake_case : List[str] = knapsack(__snake_case , __snake_case , __snake_case , __snake_case , index + 1 )
if weights[index] <= max_weight:
_snake_case : Optional[Any] = values[index] + knapsack(
__snake_case , __snake_case , __snake_case , max_weight - weights[index] , index + 1 )
return max(__snake_case , __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def A__( __lowerCAmelCase ):
_snake_case : List[Any] = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def A__( __lowerCAmelCase ):
_snake_case , _snake_case : List[Any] = emb.weight.shape
_snake_case : Dict = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
_snake_case : str = emb.weight.data
return lin_layer
def A__( __lowerCAmelCase ):
_snake_case : str = torch.load(lowerCAmelCase__ , map_location='cpu' )
_snake_case : Optional[Any] = Namespace(**checkpoint['cfg']['model'] )
_snake_case : Optional[Any] = checkpoint['model']
remove_ignore_keys_(lowerCAmelCase__ )
_snake_case : Optional[int] = state_dict['decoder.embed_tokens.weight'].shape[0]
_snake_case : Optional[Any] = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
_snake_case : Optional[Any] = XGLMConfig(
vocab_size=lowerCAmelCase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_snake_case : Tuple = XGLMForCausalLM(lowerCAmelCase__ )
_snake_case : str = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
print(lowerCAmelCase__ )
_snake_case : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase_ : Optional[Any] = parser.parse_args()
lowercase_ : Optional[int] = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowercase_ : Any = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowercase_ : Tuple = concatenate_datasets
lowercase_ : Any = DownloadConfig
lowercase_ : int = DownloadManager
lowercase_ : Union[str, Any] = DownloadMode
lowercase_ : Optional[int] = DownloadConfig
lowercase_ : List[str] = DownloadMode
lowercase_ : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowercase :
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
raise NotImplementedError()
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
raise NotImplementedError()
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple = False , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = tokenizer
_snake_case : Optional[int] = skip_prompt
_snake_case : Tuple = decode_kwargs
# variables used in the streaming process
_snake_case : Dict = []
_snake_case : Dict = 0
_snake_case : Optional[Any] = True
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
_snake_case : Dict = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_snake_case : Dict = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_snake_case : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
_snake_case : Tuple = text[self.print_len :]
_snake_case : str = []
_snake_case : Optional[int] = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_snake_case : Union[str, Any] = text[self.print_len :]
self.print_len += len(lowerCamelCase_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_snake_case : str = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(lowerCamelCase_ )
self.on_finalized_text(lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
if len(self.token_cache ) > 0:
_snake_case : Any = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_snake_case : Union[str, Any] = text[self.print_len :]
_snake_case : int = []
_snake_case : str = 0
else:
_snake_case : Any = ''
_snake_case : Optional[int] = True
self.on_finalized_text(lowerCamelCase_ , stream_end=lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : int = False ):
'''simple docstring'''
print(lowerCamelCase_ , flush=lowerCamelCase_ , end='' if not stream_end else None )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] = False , lowerCamelCase_ : List[str] = None , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : Optional[int] = Queue()
_snake_case : Union[str, Any] = None
_snake_case : int = timeout
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int = False ):
'''simple docstring'''
self.text_queue.put(lowerCamelCase_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Dict ):
'''simple docstring'''
return self
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[int] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Any = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_snake_case : Optional[int] = TOKENIZER_CLASSES
else:
_snake_case : Optional[int] = {tokenizer_name: getattr(_A , tokenizer_name + 'Fast' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_snake_case : Optional[int] = TOKENIZER_CLASSES[tokenizer_name]
_snake_case : List[Any] = True
if checkpoint_name is None:
_snake_case : Dict = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case : Optional[int] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_snake_case : List[str] = tokenizer_class.from_pretrained(_A , force_download=_A )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case : Union[str, Any] = checkpoint.split('/' )
_snake_case : Optional[Any] = os.path.join(_A , _A )
elif add_prefix:
_snake_case : List[Any] = checkpoint
_snake_case : List[Any] = dump_path
else:
_snake_case : Optional[Any] = None
_snake_case : Any = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case : Tuple = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case : Any = file_path.split(_A )[-1][0]
if next_char == "/":
_snake_case : str = os.path.join(_A , _A )
_snake_case : List[str] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_snake_case : Tuple = tokenizer.save_pretrained(
_A , legacy_format=_A , filename_prefix=_A )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(_A )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
lowercase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowercase_ : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _A , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = FunnelTokenizer
_UpperCamelCase : Tuple = FunnelTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = True
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : Any = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __UpperCAmelCase ( self : Dict , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __UpperCAmelCase ( self : str , **lowerCamelCase_ : str ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Optional[Any] = 'UNwant\u00E9d,running'
_snake_case : List[Any] = 'unwanted, running'
return input_text, output_text
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class(self.vocab_file )
_snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCamelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : List[str] = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
_snake_case : Union[str, Any] = tokenizer('UNwant\u00E9d,running' )
_snake_case : str = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
_snake_case : Any = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple=7 , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=18 , lowerCamelCase_ : Optional[Any]=30 , lowerCamelCase_ : str=4_00 , lowerCamelCase_ : int=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Tuple=True , ):
'''simple docstring'''
_snake_case : Dict = size if size is not None else {'height': 18, 'width': 18}
_snake_case : Union[str, Any] = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : Any = num_channels
_snake_case : Tuple = image_size
_snake_case : Optional[Any] = min_resolution
_snake_case : List[str] = max_resolution
_snake_case : List[Any] = do_resize
_snake_case : Dict = size
_snake_case : str = apply_ocr
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase ( snake_case__, unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_A , 'do_resize' ) )
self.assertTrue(hasattr(_A , 'size' ) )
self.assertTrue(hasattr(_A , 'apply_ocr' ) )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A )
for image in image_inputs:
self.assertIsInstance(_A , Image.Image )
# Test not batched input
_snake_case : Dict = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _A )
self.assertIsInstance(encoding.boxes , _A )
# Test batched
_snake_case : Optional[Any] = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A )
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray )
# Test not batched input
_snake_case : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case : int = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A )
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor )
# Test not batched input
_snake_case : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case : Tuple = image_processing(_A , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = LayoutLMvaImageProcessor()
from datasets import load_dataset
_snake_case : Tuple = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_snake_case : str = Image.open(ds[0]['file'] ).convert('RGB' )
_snake_case : Optional[int] = image_processing(_A , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_snake_case : Optional[int] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_snake_case : Optional[int] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _A )
self.assertListEqual(encoding.boxes , _A )
# with apply_OCR = False
_snake_case : str = LayoutLMvaImageProcessor(apply_ocr=_A )
_snake_case : int = image_processing(_A , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : List[str] = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = "gpt_neo"
_UpperCamelCase : List[str] = ["past_key_values"]
_UpperCamelCase : Optional[Any] = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , lowerCamelCase_ : Dict=5_02_57 , lowerCamelCase_ : List[Any]=20_48 , lowerCamelCase_ : Union[str, Any]=20_48 , lowerCamelCase_ : List[Any]=24 , lowerCamelCase_ : Union[str, Any]=[[["global", "local"], 12]] , lowerCamelCase_ : Tuple=16 , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Tuple=2_56 , lowerCamelCase_ : List[Any]="gelu_new" , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : List[Any]=0.0 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=1e-5 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=5_02_56 , lowerCamelCase_ : int=5_02_56 , **lowerCamelCase_ : Any , ):
'''simple docstring'''
_snake_case : Any = vocab_size
_snake_case : Any = max_position_embeddings
_snake_case : Any = hidden_size
_snake_case : Union[str, Any] = num_layers
_snake_case : Union[str, Any] = num_heads
_snake_case : Any = intermediate_size
_snake_case : Union[str, Any] = window_size
_snake_case : str = activation_function
_snake_case : List[str] = resid_dropout
_snake_case : Union[str, Any] = embed_dropout
_snake_case : List[Any] = attention_dropout
_snake_case : Dict = classifier_dropout
_snake_case : Tuple = layer_norm_epsilon
_snake_case : Tuple = initializer_range
_snake_case : List[Any] = use_cache
_snake_case : Optional[int] = bos_token_id
_snake_case : Any = eos_token_id
_snake_case : List[Any] = attention_types
_snake_case : Tuple = self.expand_attention_types_params(__a )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
import torch
_snake_case : List[Any] = input.size()
_snake_case : Tuple = len(__snake_case )
_snake_case : List[Any] = shape[dimension]
_snake_case : List[Any] = torch.arange(0 , __snake_case , __snake_case )
_snake_case : Dict = torch.div(sizedim - size , __snake_case , rounding_mode='floor' ) + 1
_snake_case : int = torch.arange(__snake_case ) + low_indices[:min_length][:, None]
_snake_case : Optional[Any] = [slice(__snake_case )] * rank
_snake_case : Any = indices
_snake_case : Optional[Any] = input[s]
_snake_case : Optional[int] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__snake_case )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
import torch
_snake_case : int = torch.arange(1 , __snake_case )
_snake_case : Optional[Any] = torch.remainder(__snake_case , __snake_case )
_snake_case : Any = remainders == 0
_snake_case : Tuple = candidates[divisor_indices]
_snake_case : List[Any] = torch.max(__snake_case )
return largest_divisor, torch.div(__snake_case , __snake_case , rounding_mode='floor' )
class lowercase ( a_ ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
_snake_case : Tuple = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case : str = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return self._config.num_heads
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : Union[str, Any] = False , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
_snake_case : List[Any] = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
_snake_case : Any = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case : Optional[Any] = seqlen + 2
_snake_case : Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case : List[str] = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
_snake_case : Optional[Any] = common_inputs['attention_mask']
if self.use_past:
_snake_case : List[str] = ordered_inputs['attention_mask'].dtype
_snake_case : Any = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return 13
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
def A__( __lowerCAmelCase ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
_snake_case : Optional[int] = 4
_snake_case : Tuple = (1 << p) - 1
for _ in range(p - 2 ):
_snake_case : Optional[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
lowercase_ : Any = logging.get_logger(__name__)
lowercase_ : Any = '''Hello world! cécé herlolip'''
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = FairseqRobertaModel.from_pretrained(A__ )
roberta.eval() # disable dropout
_snake_case : Dict = roberta.model.encoder.sentence_encoder
_snake_case : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
_snake_case : str = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , A__ )
_snake_case : Optional[int] = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_snake_case : List[str] = roberta_sent_encoder.embed_tokens.weight
_snake_case : Optional[int] = roberta_sent_encoder.embed_positions.weight
_snake_case : Optional[int] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_snake_case : Optional[int] = roberta_sent_encoder.layer_norm.weight
_snake_case : Dict = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_snake_case : int = model.roberta.encoder.layer[i]
_snake_case : Any = roberta_sent_encoder.layers[i]
_snake_case : Any = layer.attention
_snake_case : Tuple = roberta_layer.self_attn_layer_norm.weight
_snake_case : Optional[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
_snake_case : Dict = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_snake_case : int = roberta_layer.self_attn.q_proj.weight
_snake_case : List[str] = roberta_layer.self_attn.q_proj.bias
_snake_case : List[Any] = roberta_layer.self_attn.k_proj.weight
_snake_case : str = roberta_layer.self_attn.k_proj.bias
_snake_case : List[str] = roberta_layer.self_attn.v_proj.weight
_snake_case : Dict = roberta_layer.self_attn.v_proj.bias
# self-attention output
_snake_case : Optional[Any] = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_snake_case : int = roberta_layer.self_attn.out_proj.weight
_snake_case : Any = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_snake_case : Union[str, Any] = roberta_layer.final_layer_norm.weight
_snake_case : List[str] = roberta_layer.final_layer_norm.bias
# intermediate
_snake_case : Optional[Any] = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_snake_case : Optional[int] = roberta_layer.fca.weight
_snake_case : Dict = roberta_layer.fca.bias
# output
_snake_case : Optional[int] = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_snake_case : Tuple = roberta_layer.fca.weight
_snake_case : Optional[int] = roberta_layer.fca.bias
# end of layer
if classification_head:
_snake_case : Tuple = roberta.model.classification_heads['mnli'].dense.weight
_snake_case : Union[str, Any] = roberta.model.classification_heads['mnli'].dense.bias
_snake_case : int = roberta.model.classification_heads['mnli'].out_proj.weight
_snake_case : Union[str, Any] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
_snake_case : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
_snake_case : str = roberta.model.encoder.lm_head.dense.bias
_snake_case : Any = roberta.model.encoder.lm_head.layer_norm.weight
_snake_case : Any = roberta.model.encoder.lm_head.layer_norm.bias
_snake_case : Tuple = roberta.model.encoder.lm_head.weight
_snake_case : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_snake_case : int = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1
_snake_case : List[Any] = model(A__ )[0]
if classification_head:
_snake_case : int = roberta.model.classification_heads['mnli'](roberta.extract_features(A__ ) )
else:
_snake_case : List[Any] = roberta.model(A__ )[0]
print(our_output.shape , their_output.shape )
_snake_case : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
_snake_case : List[Any] = torch.allclose(A__ , A__ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
lowercase_ : Optional[int] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 714 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : int = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_snake_case : List[str] = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_snake_case : str = model(lowerCamelCase_ )["last_hidden_state"]
_snake_case : List[Any] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice.
_snake_case : Optional[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 715 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 0 |
from numpy import exp, pi, sqrt
def A__( __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : List[Any] = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase ( __lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = '''speech_to_text'''
_UpperCamelCase : List[str] = ['''past_key_values''']
_UpperCamelCase : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , lowerCamelCase_ : Tuple=1_00_00 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : Optional[Any]=20_48 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : str=6 , lowerCamelCase_ : List[Any]=20_48 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Union[str, Any]="relu" , lowerCamelCase_ : Any=2_56 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : int=True , lowerCamelCase_ : str=1 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=60_00 , lowerCamelCase_ : Any=10_24 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Union[str, Any]=(5, 5) , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : Union[str, Any]=80 , lowerCamelCase_ : str=1 , **lowerCamelCase_ : Dict , ):
'''simple docstring'''
_snake_case : Any = vocab_size
_snake_case : Union[str, Any] = d_model
_snake_case : List[str] = encoder_ffn_dim
_snake_case : List[Any] = encoder_layers
_snake_case : Union[str, Any] = encoder_attention_heads
_snake_case : Any = decoder_ffn_dim
_snake_case : Dict = decoder_layers
_snake_case : Tuple = decoder_attention_heads
_snake_case : int = dropout
_snake_case : Optional[Any] = attention_dropout
_snake_case : Optional[int] = activation_dropout
_snake_case : Optional[Any] = activation_function
_snake_case : int = init_std
_snake_case : Dict = encoder_layerdrop
_snake_case : Optional[Any] = decoder_layerdrop
_snake_case : Dict = use_cache
_snake_case : Any = encoder_layers
_snake_case : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : int = max_source_positions
_snake_case : List[Any] = max_target_positions
_snake_case : int = num_conv_layers
_snake_case : Any = list(lowerCAmelCase_ )
_snake_case : str = conv_channels
_snake_case : str = input_feat_per_channel
_snake_case : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , )
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : Optional[Any] = logging.get_logger()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
_snake_case : int = timm.create_model('levit_128s' , pretrained=__lowerCAmelCase )
else:
_snake_case : List[Any] = timm.create_model('levit_128' , pretrained=__lowerCAmelCase )
if hidden_sizes == 1_92:
_snake_case : Union[str, Any] = timm.create_model('levit_192' , pretrained=__lowerCAmelCase )
if hidden_sizes == 2_56:
_snake_case : Optional[int] = timm.create_model('levit_256' , pretrained=__lowerCAmelCase )
if hidden_sizes == 3_84:
_snake_case : int = timm.create_model('levit_384' , pretrained=__lowerCAmelCase )
from_model.eval()
_snake_case : List[Any] = LevitForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
_snake_case : Optional[Any] = OrderedDict()
_snake_case : Any = from_model.state_dict()
_snake_case : int = list(from_model.state_dict().keys() )
_snake_case : Union[str, Any] = list(our_model.state_dict().keys() )
print(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for i in range(len(__lowerCAmelCase ) ):
_snake_case : Tuple = weights[og_keys[i]]
our_model.load_state_dict(__lowerCAmelCase )
_snake_case : Union[str, Any] = torch.randn((2, 3, 2_24, 2_24) )
_snake_case : Tuple = from_model(__lowerCAmelCase )
_snake_case : Any = our_model(__lowerCAmelCase ).logits
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase ), "The model logits don't match the original one."
_snake_case : Optional[Any] = name
print(__lowerCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_snake_case : str = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def A__( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = True ):
_snake_case : Union[str, Any] = '''imagenet-1k-id2label.json'''
_snake_case : List[Any] = 10_00
_snake_case : Dict = (1, num_labels)
_snake_case : Dict = '''huggingface/label-files'''
_snake_case : Optional[Any] = num_labels
_snake_case : int = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_snake_case : Union[str, Any] = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_snake_case : Dict = idalabel
_snake_case : Any = {v: k for k, v in idalabel.items()}
_snake_case : Optional[int] = partial(__lowerCAmelCase , num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
_snake_case : Union[str, Any] = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
_snake_case : List[str] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __lowerCAmelCase , names_to_config[model_name] , __lowerCAmelCase , __lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
lowercase_ : Dict = parser.parse_args()
lowercase_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 718 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase_ : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 719 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 0 |
from collections.abc import Callable
import numpy as np
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
_snake_case : Tuple = np.zeros((n + 1,) )
_snake_case : int = ya
_snake_case : List[Any] = xa
for k in range(__a ):
_snake_case : Tuple = y[k] + step_size * ode_func(__a , y[k] )
_snake_case : Dict = y[k] + (
(step_size / 2) * (ode_func(__a , y[k] ) + ode_func(x + step_size , __a ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if height >= 1:
move_tower(height - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
move_disk(__lowerCAmelCase , __lowerCAmelCase )
move_tower(height - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
print('moving disk from' , __lowerCAmelCase , 'to' , __lowerCAmelCase )
def A__( ):
_snake_case : Dict = int(input('Height of hanoi: ' ).strip() )
move_tower(__lowerCAmelCase , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 721 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A__( __lowerCAmelCase ) -> Dict:
for param in module.parameters():
_snake_case : List[str] = False
def A__( ) -> List[str]:
_snake_case : Dict = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_snake_case : Optional[int] = '''mps'''
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def A__( __lowerCAmelCase ) -> str:
_snake_case : Optional[int] = plt.imshow(__lowerCAmelCase )
fig.axes.get_xaxis().set_visible(__lowerCAmelCase )
fig.axes.get_yaxis().set_visible(__lowerCAmelCase )
plt.show()
def A__( ) -> List[Any]:
_snake_case : Any = datetime.now()
_snake_case : Dict = current_time.strftime('%H:%M:%S' )
return timestamp
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
from datetime import datetime as dt
import os
from github import Github
lowercase_ : List[Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def A__( ):
_snake_case : Union[str, Any] = Github(os.environ['GITHUB_TOKEN'] )
_snake_case : Tuple = g.get_repo('huggingface/transformers' )
_snake_case : Optional[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_snake_case : Dict = sorted([comment for comment in issue.get_comments()] , key=lambda __lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
_snake_case : Dict = comments[0] if len(_lowerCAmelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
def A__( __lowerCAmelCase ):
return "".join([hex(lowerCamelCase_ )[2:].zfill(2 ).upper() for byte in list(lowerCamelCase_ )] )
def A__( __lowerCAmelCase ):
if (len(lowerCamelCase_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCamelCase_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowerCamelCase_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
import functools
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(__lowerCAmelCase ) == 0:
return 0
if min(__lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(__lowerCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
_snake_case : Optional[int] = set(__lowerCAmelCase )
@functools.cache
def dynamic_programming(__lowerCAmelCase ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any]=13 , lowerCamelCase_ : Any=64 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : List[str]=5 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : int=[1, 16, 4, 4] , lowerCamelCase_ : int=None , ):
'''simple docstring'''
_snake_case : Optional[int] = parent
_snake_case : Optional[Any] = batch_size
_snake_case : Optional[Any] = image_size
_snake_case : List[str] = patch_size
_snake_case : Any = num_channels
_snake_case : List[str] = is_training
_snake_case : str = use_labels
_snake_case : Optional[Any] = hidden_size
_snake_case : Union[str, Any] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : List[str] = intermediate_size
_snake_case : str = hidden_act
_snake_case : str = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : List[Any] = type_sequence_label_size
_snake_case : Tuple = initializer_range
_snake_case : Union[str, Any] = scope
_snake_case : str = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_snake_case : List[str] = (self.image_size // 32) ** 2
_snake_case : List[str] = num_patches + 1
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Tuple = None
if self.use_labels:
_snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : int = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_lowercase , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : List[str] = ViTHybridModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : int = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.type_sequence_label_size
_snake_case : str = ViTHybridForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
_snake_case : Any = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case : int = config_and_inputs
_snake_case : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_UpperCamelCase : Tuple = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Dict = False
_UpperCamelCase : int = False
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = ViTHybridModelTester(self )
_snake_case : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case , _snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : List[Any] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Any = model_class(_lowercase )
_snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : List[str] = [*signature.parameters.keys()]
_snake_case : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowercase )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case : Optional[Any] = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
_snake_case : int = model_class(config=_lowercase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_snake_case : Tuple = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case : Any = ViTHybridModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def A__( ):
_snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_lowercase )
_snake_case : Dict = self.default_image_processor
_snake_case : int = prepare_img()
_snake_case : Any = image_processor(images=_lowercase , return_tensors='pt' ).to(_lowercase )
# forward pass
with torch.no_grad():
_snake_case : Union[str, Any] = model(**_lowercase )
# verify the logits
_snake_case : Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowercase )
_snake_case : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
@require_accelerate
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
_snake_case : Any = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
_snake_case : Tuple = prepare_img()
_snake_case : Dict = image_processor(images=_lowercase , return_tensors='pt' )
_snake_case : List[str] = model(**_lowercase )
_snake_case : List[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
_snake_case : Any = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase ):
return int(input_a == input_a == 0 )
def A__( ):
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 704 |
from math import factorial
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_snake_case : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_snake_case : List[Any] = float(factorial(__lowerCAmelCase ) )
coefficient /= factorial(__lowerCAmelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 652 | 0 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any]=13 , lowerCamelCase_ : Optional[Any]=30 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : str=32 , lowerCamelCase_ : List[Any]=5 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=0.02 , ):
'''simple docstring'''
_snake_case : Optional[Any] = parent
_snake_case : str = batch_size
_snake_case : Tuple = image_size
_snake_case : Optional[Any] = patch_size
_snake_case : Optional[int] = num_channels
_snake_case : Dict = is_training
_snake_case : List[str] = use_labels
_snake_case : Optional[int] = hidden_size
_snake_case : Any = num_hidden_layers
_snake_case : Optional[int] = num_attention_heads
_snake_case : Optional[Any] = intermediate_size
_snake_case : Dict = hidden_act
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Any = attention_probs_dropout_prob
_snake_case : Any = type_sequence_label_size
_snake_case : List[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case : Union[str, Any] = (image_size // patch_size) ** 2
_snake_case : int = num_patches + 1
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case : Dict = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Any = FlaxViTModel(config=A_ )
_snake_case : Dict = model(A_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_snake_case : Optional[Any] = (self.image_size, self.image_size)
_snake_case : str = (self.patch_size, self.patch_size)
_snake_case : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : List[str] = self.type_sequence_label_size
_snake_case : Optional[Any] = FlaxViTForImageClassification(config=A_ )
_snake_case : Optional[Any] = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case : Tuple = 1
_snake_case : int = FlaxViTForImageClassification(A_ )
_snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case : List[str] = model(A_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) ,
) : Union[str, Any] = config_and_inputs
_snake_case : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = FlaxViTModelTester(self )
_snake_case : Optional[int] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case : Tuple = model_class(A_ )
_snake_case : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case : Optional[int] = [*signature.parameters.keys()]
_snake_case : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_snake_case : Union[str, Any] = self._prepare_for_class(A_ , A_ )
_snake_case : Union[str, Any] = model_class(A_ )
@jax.jit
def model_jitted(lowerCamelCase_ : str , **lowerCamelCase_ : int ):
return model(pixel_values=A_ , **A_ )
with self.subTest('JIT Enabled' ):
_snake_case : Optional[Any] = model_jitted(**A_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_snake_case : Union[str, Any] = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_snake_case : Optional[int] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
_snake_case : Any = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(A_ )
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase_ : int = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class lowercase ( __lowercase ):
"""simple docstring"""
def __init__( self : str , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ):
'''simple docstring'''
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Any=None ):
'''simple docstring'''
_snake_case : Dict = {}
if top_k is not None:
_snake_case : Optional[int] = top_k
return {}, {}, postprocess_params
def __call__( self : Tuple , lowerCamelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCamelCase_ : int ):
'''simple docstring'''
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : int = load_image(lowerCamelCase_ )
_snake_case : str = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
return model_inputs
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : Any = self.model(**lowerCamelCase_ )
return model_outputs
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
_snake_case : Dict = self.model.config.num_labels
if self.framework == "pt":
_snake_case : Tuple = model_outputs.logits.softmax(-1 )[0]
_snake_case : int = probs.topk(lowerCamelCase_ )
elif self.framework == "tf":
_snake_case : Optional[int] = stable_softmax(model_outputs.logits , axis=-1 )[0]
_snake_case : Optional[Any] = tf.math.top_k(lowerCamelCase_ , k=lowerCamelCase_ )
_snake_case : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
_snake_case : int = scores.tolist()
_snake_case : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase_ , lowerCamelCase_ )]
| 706 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
__UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , )
_snake_case : Union[str, Any] = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths}
_snake_case : Union[str, Any] = Text(
cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , **__UpperCamelCase , )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.streaming:
_snake_case : Any = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : str = None
_snake_case : Optional[Any] = None
_snake_case : str = None
self.builder.download_and_prepare(
download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , )
_snake_case : str = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory )
return dataset
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
import requests
lowercase_ : Optional[int] = '''''' # <-- Put your OpenWeatherMap appid here!
lowercase_ : Optional[int] = '''https://api.openweathermap.org/data/2.5/'''
def A__( __lowerCAmelCase = "Chicago" , __lowerCAmelCase = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def A__( __lowerCAmelCase = "Kolkata, India" , __lowerCAmelCase = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def A__( __lowerCAmelCase = 55.68 , __lowerCAmelCase = 12.57 , __lowerCAmelCase = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
lowercase_ : List[str] = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase_ : Optional[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_dataset(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : str = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
inspect_metric(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Tuple = path + '.py'
assert script_name in os.listdir(__lowerCAmelCase )
assert "__pycache__" not in os.listdir(__lowerCAmelCase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_config_info(__lowerCAmelCase , config_name=__lowerCAmelCase )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = get_dataset_config_names(__lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = get_dataset_infos(__lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
_snake_case : Any = expected_configs[0]
assert expected_config in infos
_snake_case : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = get_dataset_infos(__lowerCAmelCase )
assert expected_config in infos
_snake_case : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with pytest.raises(__lowerCAmelCase ):
get_dataset_split_names(__lowerCAmelCase , config_name=__lowerCAmelCase )
| 652 | 0 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ('weight',)
_snake_case : str = torch.permute(lowerCAmelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase__ ):
# linear layer
_snake_case : Dict = flax_key_tuple[:-1] + ('weight',)
_snake_case : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Optional[int] = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if "metadata" in layer:
_snake_case : List[Any] = layer.split('metadata' )
_snake_case : Any = ''.join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
_snake_case : int = layer.split('kvstore' )
_snake_case : Dict = ''.join(split_layer[0] )[:-1]
_snake_case : Union[str, Any] = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
_snake_case : List[str] = layer.split('/' )
_snake_case : int = '/'.join(split_layer[:-1] )
_snake_case : List[str] = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Dict = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_snake_case : str = 'file'
else:
_snake_case : Optional[Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = rename_keys(lowerCAmelCase__ )
_snake_case : Any = {}
for k, v in current_block.items():
_snake_case : Any = v
_snake_case : Dict = new_current_block
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = WEIGHTS_NAME ):
_snake_case : str = convert_file_size_to_int(lowerCAmelCase__ )
_snake_case : Tuple = []
_snake_case : List[Any] = {}
_snake_case : List[Any] = 0
_snake_case : int = 0
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp:
_snake_case : Optional[Any] = serialization.msgpack_restore(fp.read() )['optimizer']['target']
_snake_case : Dict = flatten_dict(lowerCAmelCase__ , sep='/' )
_snake_case : int = {}
for layer in checkpoint_info.keys():
_snake_case : Union[str, Any] = get_key_and_tensorstore_dict(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if curr_real_layer_name in all_layers:
_snake_case : Any = content
else:
_snake_case : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : str = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Union[str, Any] = torch.tensor(lowerCAmelCase__ )
_snake_case : List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split('/' ) ) , lowerCAmelCase__ )
_snake_case : Optional[Any] = '/'.join(lowerCAmelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : List[Any] = os.path.join(
lowerCAmelCase__ , weights_name.replace('.bin' , F'''-{len(lowerCAmelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCAmelCase__ , lowerCAmelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : Union[str, Any] = {}
_snake_case : str = 0
_snake_case : Optional[int] = raw_weights.to(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : List[str] = os.path.join(lowerCAmelCase__ , weights_name.replace('.bin' , F'''-{len(lowerCAmelCase__ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(lowerCAmelCase__ , lowerCAmelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : Union[str, Any] = {}
_snake_case : Optional[int] = {}
for idx, shard in enumerate(lowerCAmelCase__ ):
_snake_case : int = weights_name.replace(
'.bin' , F'''-{idx+1:05d}-of-{len(lowerCAmelCase__ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_snake_case : str = os.path.join(lowerCAmelCase__ , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
_snake_case : Optional[int] = shard
for key in shard:
_snake_case : Optional[Any] = shard_file
# Add the metadata
_snake_case : Optional[int] = {'total_size': total_size}
_snake_case : int = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' , encoding='utf-8' ) as f:
_snake_case : Union[str, Any] = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '\n'
f.write(lowerCAmelCase__ )
return metadata, index
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
lowercase_ : Union[str, Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A__( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : Optional[Any] = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
_snake_case : int = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' )
_snake_case : Any = TaTokenizer.from_pretrained('t5-small' )
_snake_case : Dict = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
_snake_case : int = tokenizer(lowerCAmelCase__ , return_tensors='pt' ).input_ids
_snake_case : List[Any] = model.generate(lowerCAmelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 709 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A__( __lowerCAmelCase ):
_snake_case : List[str] = int(number**0.5 )
return number == sq * sq
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_snake_case : str = x_den * y_den * z_den
_snake_case : Optional[Any] = gcd(__lowerCAmelCase , __lowerCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A__( __lowerCAmelCase = 35 ):
_snake_case : Optional[Any] = set()
_snake_case : int = 42
_snake_case : Optional[int] = Fraction(0 )
_snake_case : Optional[Any] = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_snake_case : str = x_num * y_den + x_den * y_num
_snake_case : Dict = x_den * y_den
_snake_case : Optional[Any] = gcd(__lowerCAmelCase , __lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_snake_case : str = add_three(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
unique_s.add(__lowerCAmelCase )
# n=2
_snake_case : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_snake_case : Dict = x_den * x_den * y_den * y_den
if is_sq(__lowerCAmelCase ) and is_sq(__lowerCAmelCase ):
_snake_case : List[Any] = int(sqrt(__lowerCAmelCase ) )
_snake_case : Tuple = int(sqrt(__lowerCAmelCase ) )
_snake_case : Union[str, Any] = gcd(__lowerCAmelCase , __lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_snake_case : int = add_three(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
unique_s.add(__lowerCAmelCase )
# n=-1
_snake_case : List[Any] = x_num * y_num
_snake_case : Optional[int] = x_den * y_num + x_num * y_den
_snake_case : List[str] = gcd(__lowerCAmelCase , __lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_snake_case : List[Any] = add_three(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
unique_s.add(__lowerCAmelCase )
# n=2
_snake_case : List[Any] = x_num * x_num * y_num * y_num
_snake_case : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowerCAmelCase ) and is_sq(__lowerCAmelCase ):
_snake_case : Optional[int] = int(sqrt(__lowerCAmelCase ) )
_snake_case : int = int(sqrt(__lowerCAmelCase ) )
_snake_case : List[str] = gcd(__lowerCAmelCase , __lowerCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_snake_case : Dict = add_three(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
unique_s.add(__lowerCAmelCase )
for num, den in unique_s:
total += Fraction(__lowerCAmelCase , __lowerCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F'''{solution() = }''')
| 710 |
import itertools
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( ):
_snake_case : Optional[Any] = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def A__( __lowerCAmelCase = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : str = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ : Dict = '''bert-base-cased'''
lowercase_ : Any = '''google/pegasus-xsum'''
lowercase_ : str = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
lowercase_ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
lowercase_ : Any = '''patrickvonplaten/t5-tiny-random'''
lowercase_ : List[Any] = '''sshleifer/bart-tiny-random'''
lowercase_ : Dict = '''sshleifer/tiny-mbart'''
lowercase_ : str = '''sshleifer/tiny-marian-en-de'''
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : str = '\n'.join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open('w' ).writelines(__lowerCAmelCase )
def A__( __lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.source''' ) , __lowerCAmelCase )
_dump_articles(os.path.join(__lowerCAmelCase , F'''{split}.target''' ) , __lowerCAmelCase )
return tmp_dir
class lowercase ( a_ ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Optional[int] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Any = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Dict = 4
_snake_case : Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_snake_case , _snake_case : Optional[Any] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , src_lang=lowerCamelCase_ , tgt_lang=lowerCamelCase_ , )
_snake_case : List[str] = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_snake_case : List[Any] = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : List[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_snake_case : Dict = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in ARTICLES )
_snake_case : Union[str, Any] = max(len(tokenizer.encode(lowerCamelCase_ ) ) for a in SUMMARIES )
_snake_case : Union[str, Any] = 4
_snake_case : Optional[int] = LegacySeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=20 , max_target_length=lowerCamelCase_ , )
_snake_case : Dict = DataLoader(lowerCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines()
_snake_case : Optional[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowerCamelCase_ , lowerCamelCase_ , 1_28 , lowerCamelCase_ )
_snake_case : Tuple = {x.name for x in tmp_dir.iterdir()}
_snake_case : Dict = {x.name for x in save_dir.iterdir()}
_snake_case : str = save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowerCamelCase_ ) < len(lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowerCamelCase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_snake_case , _snake_case , _snake_case : int = self._get_dataset(max_len=64 )
_snake_case : List[str] = 64
_snake_case : str = ds.make_dynamic_sampler(lowerCamelCase_ , required_batch_size_multiple=lowerCamelCase_ )
_snake_case : Optional[Any] = [len(lowerCamelCase_ ) for x in batch_sampler]
assert len(set(lowerCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowerCamelCase_ ) == len(lowerCamelCase_ ) # no dropped or added examples
_snake_case : Union[str, Any] = DataLoader(lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : List[Any] = []
_snake_case : List[Any] = []
for batch in data_loader:
_snake_case : Any = batch['input_ids'].shape
_snake_case : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_snake_case : int = np.product(batch['input_ids'].shape )
num_src_per_batch.append(lowerCamelCase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowerCamelCase_ )
assert num_src_per_batch[0] == max(lowerCamelCase_ )
if failures:
raise AssertionError(f'''too many tokens in {len(lowerCamelCase_ )} batches''' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : str = self._get_dataset(max_len=5_12 )
_snake_case : Optional[Any] = 2
_snake_case : Dict = ds.make_sortish_sampler(lowerCamelCase_ , shuffle=lowerCamelCase_ )
_snake_case : int = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 )
_snake_case : str = DataLoader(lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCamelCase_ )
_snake_case : Tuple = tokenizer.pad_token_id
def count_pad_tokens(lowerCamelCase_ : List[str] , lowerCamelCase_ : Any="input_ids" ):
return [batch[k].eq(lowerCamelCase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) ) < sum(count_pad_tokens(lowerCamelCase_ , k='labels' ) )
assert sum(count_pad_tokens(lowerCamelCase_ ) ) < sum(count_pad_tokens(lowerCamelCase_ ) )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Tuple=10_00 , lowerCamelCase_ : Tuple=1_28 ):
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , lowerCamelCase_ ):
_snake_case : Dict = 'examples/seq2seq/wmt_en_ro'
_snake_case : List[Any] = max_len * 2 * 64
if not Path(lowerCamelCase_ ).joinpath('train.len' ).exists():
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
else:
_snake_case : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro'
_snake_case : List[Any] = max_len * 4
save_len_file(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
_snake_case : str = SeqaSeqDataset(
lowerCamelCase_ , data_dir=lowerCamelCase_ , type_path='train' , max_source_length=lowerCamelCase_ , max_target_length=lowerCamelCase_ , n_obs=lowerCamelCase_ , )
return ds, max_tokens, tokenizer
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case : Any = self._get_dataset()
_snake_case : List[str] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=lowerCamelCase_ ) )
_snake_case : Optional[Any] = set(DistributedSortishSampler(lowerCamelCase_ , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=lowerCamelCase_ ) )
assert idsa.intersection(lowerCamelCase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase_ , use_fast=lowerCamelCase_ )
if tok_name == MBART_TINY:
_snake_case : int = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_snake_case : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_snake_case : Tuple = SeqaSeqDataset(
lowerCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_snake_case : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowerCamelCase_ ) == 1 if tok_name == BART_TINY else len(lowerCamelCase_ ) == 0
| 652 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
lowercase_ : int = None
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowercase_ : Tuple = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase_ : Dict = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class lowercase ( __UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = ["input_ids", "attention_mask"]
_UpperCamelCase : List[str] = TaTokenizer
_UpperCamelCase : str = []
def __init__( self : str , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str="</s>" , lowerCamelCase_ : List[str]="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : Optional[int]=1_00 , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Any , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
_snake_case : Union[str, Any] = [f'''<extra_id_{i}>''' for i in range(lowerCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_snake_case : List[Any] = len(set(filter(lambda lowerCamelCase_ : bool('extra_id_' in str(lowerCAmelCase_ ) ) , lowerCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case : str = vocab_file
_snake_case : int = False if not self.vocab_file else True
_snake_case : int = extra_ids
@staticmethod
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_snake_case : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
f''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
f''' {pretrained_model_name_or_path} automatically truncating your input to'''
f''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
f''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase_ , )
return max_model_length
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : Tuple = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
logger.info(f'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def __UpperCAmelCase ( self : int , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_snake_case : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
return list(
set(filter(lambda lowerCamelCase_ : bool(re.search(R'<extra_id_\d+>' , lowerCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
return [self.convert_tokens_to_ids(lowerCAmelCase_ ) for token in self.get_sentinel_tokens()]
| 712 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( _UpperCamelCase ):
"""simple docstring"""
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : int = "Pix2StructImageProcessor"
_UpperCamelCase : int = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : List[Any] = False
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Any , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : List[Any] = True , lowerCamelCase_ : Tuple = False , lowerCamelCase_ : Optional[Any] = None , lowerCamelCase_ : int = None , lowerCamelCase_ : Optional[Any] = 20_48 , lowerCamelCase_ : Any = 0 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Any = False , lowerCamelCase_ : Any = False , lowerCamelCase_ : Optional[Any] = False , lowerCamelCase_ : str = False , lowerCamelCase_ : int = False , lowerCamelCase_ : str = True , lowerCamelCase_ : Optional[Any] = None , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_snake_case : int = self.tokenizer
_snake_case : List[str] = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_snake_case : Optional[Any] = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , max_patches=_UpperCAmelCase , **_UpperCAmelCase )
else:
# add pixel_values and bbox
_snake_case : Dict = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , max_patches=_UpperCAmelCase , header_text=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_snake_case : Dict = self.tokenizer(
text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
if "attention_mask" in text_encoding:
_snake_case : List[str] = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_snake_case : Dict = text_encoding.pop('input_ids' )
else:
_snake_case : Optional[Any] = None
if text_encoding is not None:
encoding_image_processor.update(_UpperCAmelCase )
return encoding_image_processor
def __UpperCAmelCase ( self : int , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def __UpperCAmelCase ( self : Dict , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Any ):
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer.model_input_names
_snake_case : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Any = {
'''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''],
'''tokenization_canine''': ['''CanineTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Optional[Any] = [
'''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CanineForMultipleChoice''',
'''CanineForQuestionAnswering''',
'''CanineForSequenceClassification''',
'''CanineForTokenClassification''',
'''CanineLayer''',
'''CanineModel''',
'''CaninePreTrainedModel''',
'''load_tf_weights_in_canine''',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652 | 0 |
from bisect import bisect
from itertools import accumulate
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = sorted(zip(UpperCAmelCase__ , UpperCAmelCase__ ) , key=lambda __lowerCAmelCase : x[0] / x[1] , reverse=UpperCAmelCase__ )
_snake_case , _snake_case : Optional[int] = [i[0] for i in r], [i[1] for i in r]
_snake_case : Any = list(accumulate(UpperCAmelCase__ ) )
_snake_case : Union[str, Any] = bisect(UpperCAmelCase__ , UpperCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
import math
def A__( __lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__( __lowerCAmelCase = 1_00_01 ):
try:
_snake_case : int = int(__lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
_snake_case : list[int] = []
_snake_case : List[Any] = 2
while len(__lowerCAmelCase ) < nth:
if is_prime(__lowerCAmelCase ):
primes.append(__lowerCAmelCase )
num += 1
else:
num += 1
return primes[len(__lowerCAmelCase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 652 | 0 |
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if height >= 1:
move_tower(height - 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
move_disk(UpperCAmelCase__ , UpperCAmelCase__ )
move_tower(height - 1 , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def A__( __lowerCAmelCase , __lowerCAmelCase ):
print('moving disk from' , UpperCAmelCase__ , 'to' , UpperCAmelCase__ )
def A__( ):
_snake_case : List[str] = int(input('Height of hanoi: ' ).strip() )
move_tower(UpperCAmelCase__ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 715 |
import torch
from transformers import AutoModel
class lowercase ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase_ : Dict="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(lowerCamelCase_ , self ).__init__()
_snake_case : Optional[Any] = AutoModel.from_pretrained(lowerCamelCase_ , return_dict=lowerCamelCase_ )
_snake_case : str = torch.nn.CosineSimilarity(3 , 1e-08 )
_snake_case : str = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self : int , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
return self.bert(**lowerCamelCase_ ).last_hidden_state
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(lowerCamelCase_ , lowerCamelCase_ ) )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = W_supports['sizes'].tolist()
_snake_case : int = W_supports['start_token_id'].item()
_snake_case : List[str] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
_snake_case : Optional[int] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.BERT(**lowerCamelCase_ )
_snake_case : Optional[int] = None
_snake_case : Optional[int] = None
_snake_case : List[str] = W_supports['input_ids'] == start_token_id
_snake_case : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(lowerCamelCase_ ):
if i == 0:
_snake_case : str = 0
else:
_snake_case : Union[str, Any] = support_sizes[i - 1]
_snake_case : Tuple = S[s : s + size][start_token_masks[s : s + size]]
_snake_case : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
_snake_case : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
_snake_case : Union[str, Any] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
_snake_case : Optional[Any] = torch.vstack((p_starts, p_start) )
_snake_case : List[str] = torch.vstack((p_ends, p_end) )
else:
_snake_case : Union[str, Any] = p_start
_snake_case : Any = p_end
return p_starts, p_ends
| 652 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : List[str] = logging.get_logger(__name__)
UpperCamelCase_ : List[Any] = '''▁'''
UpperCamelCase_ : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
UpperCamelCase_ : Union[str, Any] = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
UpperCamelCase_ : Union[str, Any] = {'''vinai/bartpho-syllable''': 1024}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Any = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]="<s>" , lowerCamelCase_ : int="</s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Optional[int]="<s>" , lowerCamelCase_ : int="<unk>" , lowerCamelCase_ : Union[str, Any]="<pad>" , lowerCamelCase_ : Optional[int]="<mask>" , lowerCamelCase_ : Union[str, Any] = None , **lowerCamelCase_ : List[str] , ):
'''simple docstring'''
_snake_case : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
_snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
_snake_case : Optional[int] = vocab_file
_snake_case : Optional[Any] = monolingual_vocab_file
_snake_case : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_snake_case : Dict = {}
_snake_case : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
_snake_case : Optional[Any] = cnt
cnt += 1
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
_snake_case : Union[str, Any] = line.strip().split()[0]
_snake_case : Dict = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
_snake_case : Union[str, Any] = len(self.fairseq_tokens_to_ids )
_snake_case : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ):
'''simple docstring'''
_snake_case : List[str] = self.__dict__.copy()
_snake_case : Optional[Any] = None
_snake_case : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case : int = {}
_snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case : Tuple = [self.cls_token_id]
_snake_case : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : int = None , lowerCamelCase_ : int = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : str = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : str , lowerCamelCase_ : int ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Any ):
'''simple docstring'''
_snake_case : List[str] = ''.join(lowerCamelCase_ ).replace(lowerCamelCase_ , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : str = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , 'wb' ) as fi:
_snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase_ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 716 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowercase ( a_ , a_ , a_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : float , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : List[str] = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Union[str, Any] = False
_snake_case : Tuple = nn.Dropout(p=lowerCamelCase_ )
_snake_case : Union[str, Any] = TaConfig(
vocab_size=lowerCamelCase_ , d_model=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_kv=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , feed_forward_proj=lowerCamelCase_ , is_decoder=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , )
_snake_case : Union[str, Any] = nn.ModuleList()
for lyr_num in range(lowerCamelCase_ ):
_snake_case : Any = TaBlock(lowerCamelCase_ )
self.encoders.append(lowerCamelCase_ )
_snake_case : Tuple = TaLayerNorm(lowerCamelCase_ )
_snake_case : List[str] = nn.Dropout(p=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Any = self.token_embedder(lowerCamelCase_ )
_snake_case : List[Any] = encoder_input_tokens.shape[1]
_snake_case : Any = torch.arange(lowerCamelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCamelCase_ )
_snake_case : Tuple = self.dropout_pre(lowerCamelCase_ )
# inverted the attention mask
_snake_case : Dict = encoder_input_tokens.size()
_snake_case : Optional[int] = self.get_extended_attention_mask(lowerCamelCase_ , lowerCamelCase_ )
for lyr in self.encoders:
_snake_case : str = lyr(lowerCamelCase_ , lowerCamelCase_ )[0]
_snake_case : Any = self.layer_norm(lowerCamelCase_ )
return self.dropout_post(lowerCamelCase_ ), encoder_inputs_mask
| 652 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ : Union[str, Any] = None
lowercase_ : Dict = logging.get_logger(__name__)
lowercase_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ : Dict = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ : Any = {
"""moussaKam/mbarthez""": 1024,
"""moussaKam/barthez""": 1024,
"""moussaKam/barthez-orangesum-title""": 1024,
}
lowercase_ : Optional[Any] = """▁"""
class lowercase ( __UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = ["input_ids", "attention_mask"]
_UpperCamelCase : Any = BarthezTokenizer
def __init__( self : Union[str, Any] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : int="</s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : str="<mask>" , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
_snake_case : List[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
_snake_case : int = vocab_file
_snake_case : str = False if not self.vocab_file else True
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
_snake_case : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Any = None ):
'''simple docstring'''
_snake_case : Any = [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : List[str] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 717 |
def A__( __lowerCAmelCase ):
assert column_title.isupper()
_snake_case : List[Any] = 0
_snake_case : List[str] = len(__lowerCAmelCase ) - 1
_snake_case : Dict = 0
while index >= 0:
_snake_case : List[str] = (ord(column_title[index] ) - 64) * pow(26 , __lowerCAmelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 | 0 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Any = ["flax"]
def __init__( self : Optional[Any] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = ["flax"]
def __init__( self : Optional[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Any , *lowerCamelCase_ : int , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : int , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = ["flax"]
def __init__( self : Tuple , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : str , *lowerCamelCase_ : Dict , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Any , *lowerCamelCase_ : int , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Dict = ["flax"]
def __init__( self : List[str] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : str , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = ["flax"]
def __init__( self : Union[str, Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : int , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Any = ["flax"]
def __init__( self : Any , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : str , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = ["flax"]
def __init__( self : Any , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[int] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = ["flax"]
def __init__( self : List[str] , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : int , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : str = ["flax"]
def __init__( self : Union[str, Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : str , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = ["flax"]
def __init__( self : List[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[str] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Optional[int] = ["flax"]
def __init__( self : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Tuple , *lowerCamelCase_ : Any , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : int , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : str = ["flax"]
def __init__( self : Optional[int] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
class lowercase ( metaclass=__lowerCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = ["flax"]
def __init__( self : List[str] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(self , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : List[Any] , *lowerCamelCase_ : int , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
@classmethod
def __UpperCAmelCase ( cls : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ['flax'] )
| 718 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ : List[str] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
lowercase_ : Optional[int] = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
lowercase_ : Any = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
_snake_case : str = len(references[0] )
if any(len(lowerCamelCase_ ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
_snake_case : int = [[refs[i] for refs in references] for i in range(lowerCamelCase_ )]
_snake_case : Optional[int] = TER(
normalized=lowerCamelCase_ , no_punct=lowerCamelCase_ , asian_support=lowerCamelCase_ , case_sensitive=lowerCamelCase_ , )
_snake_case : Optional[Any] = sb_ter.corpus_score(lowerCamelCase_ , lowerCamelCase_ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 652 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase_ : int = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def A__( __lowerCAmelCase=None ):
if subparsers is not None:
_snake_case : int = subparsers.add_parser('tpu-config' , description=_description )
else:
_snake_case : Tuple = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
_snake_case : Optional[int] = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=snake_case_ , default=snake_case_ , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=snake_case_ , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=snake_case_ , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
_snake_case : Any = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=snake_case_ , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def A__( __lowerCAmelCase ):
_snake_case : Dict = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(snake_case_ ):
_snake_case : str = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_snake_case : int = defaults.command_file
if not args.command and defaults.commands is not None:
_snake_case : Tuple = defaults.commands
if not args.tpu_name:
_snake_case : int = defaults.tpu_name
if not args.tpu_zone:
_snake_case : Tuple = defaults.tpu_zone
if args.accelerate_version == "dev":
_snake_case : str = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
_snake_case : Any = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , snake_case_ ):
_snake_case : Dict = F'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
_snake_case : List[str] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , snake_case_ ):
_snake_case : Dict = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_snake_case : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F'''pip install {args.accelerate_version}''']
new_cmd += args.command
_snake_case : List[Any] = '''; '''.join(snake_case_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_snake_case : int = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'''Running {' '.join(snake_case_ )}''' )
return
subprocess.run(snake_case_ )
print('Successfully setup pod.' )
def A__( ):
_snake_case : Union[str, Any] = tpu_command_parser()
_snake_case : Tuple = parser.parse_args()
tpu_command_launcher(snake_case_ )
| 719 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ : Optional[int] = object()
# For specifying empty leaf dict `{}`
lowercase_ : List[Any] = object()
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__lowerCAmelCase ) - len(__lowerCAmelCase ) + 1 ):
_snake_case : Tuple = [x.match(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , ks[i:] )]
if matches and all(__lowerCAmelCase ):
return True
return False
def A__( __lowerCAmelCase ):
def replace(__lowerCAmelCase , __lowerCAmelCase ):
for rule, replacement in rules:
if _match(__lowerCAmelCase , __lowerCAmelCase ):
return replacement
return val
return replace
def A__( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __lowerCAmelCase )),
(("transformer", "wte", "embedding"), P('mp' , __lowerCAmelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__lowerCAmelCase , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __lowerCAmelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__( __lowerCAmelCase ):
_snake_case : Optional[Any] = _get_partition_rules()
_snake_case : Optional[Any] = _replacement_rules(__lowerCAmelCase )
_snake_case : str = {k: _unmatched for k in flatten_dict(__lowerCAmelCase )}
_snake_case : str = {k: replace(__lowerCAmelCase , __lowerCAmelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__lowerCAmelCase ) )
| 652 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase : int = '''encoder-decoder'''
_UpperCamelCase : Dict = True
def __init__( self : Tuple , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_snake_case : int = kwargs.pop('encoder' )
_snake_case : Tuple = encoder_config.pop('model_type' )
_snake_case : Union[str, Any] = kwargs.pop('decoder' )
_snake_case : List[Any] = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
_snake_case : int = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : Optional[int] = AutoConfig.for_model(lowerCamelCase_ , **lowerCamelCase_ )
_snake_case : List[str] = True
@classmethod
def __UpperCAmelCase ( cls : int , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_snake_case : Optional[Any] = True
_snake_case : str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : Dict = copy.deepcopy(self.__dict__ )
_snake_case : str = self.encoder.to_dict()
_snake_case : Dict = self.decoder.to_dict()
_snake_case : List[Any] = self.__class__.model_type
return output
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase_ : Tuple = logging.get_logger(__name__)
lowercase_ : int = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = """perceiver"""
def __init__( self : Optional[Any] , lowerCamelCase_ : str=2_56 , lowerCamelCase_ : Optional[int]=12_80 , lowerCamelCase_ : Optional[int]=7_68 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : int=26 , lowerCamelCase_ : Any=8 , lowerCamelCase_ : Union[str, Any]=8 , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[str]="kv" , lowerCamelCase_ : str=1 , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : List[Any]=0.02 , lowerCamelCase_ : Optional[Any]=1e-12 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Any=2_62 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Dict=56 , lowerCamelCase_ : Tuple=[3_68, 4_96] , lowerCamelCase_ : int=16 , lowerCamelCase_ : Any=19_20 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : Dict=[1, 16, 2_24, 2_24] , **lowerCamelCase_ : str , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
_snake_case : Optional[int] = num_latents
_snake_case : str = d_latents
_snake_case : Dict = d_model
_snake_case : int = num_blocks
_snake_case : str = num_self_attends_per_block
_snake_case : Any = num_self_attention_heads
_snake_case : int = num_cross_attention_heads
_snake_case : str = qk_channels
_snake_case : List[Any] = v_channels
_snake_case : Dict = cross_attention_shape_for_attention
_snake_case : Union[str, Any] = self_attention_widening_factor
_snake_case : Tuple = cross_attention_widening_factor
_snake_case : str = hidden_act
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : int = initializer_range
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : List[Any] = use_query_residual
# masked language modeling attributes
_snake_case : int = vocab_size
_snake_case : int = max_position_embeddings
# image classification attributes
_snake_case : Dict = image_size
# flow attributes
_snake_case : Optional[int] = train_size
# multimodal autoencoding attributes
_snake_case : Tuple = num_frames
_snake_case : int = audio_samples_per_frame
_snake_case : List[str] = samples_per_patch
_snake_case : Any = output_shape
class lowercase ( a_ ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return 1e-4
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[TensorType] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : Tuple = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case : List[Any] = preprocessor.num_special_tokens_to_add(lowerCamelCase_ )
_snake_case : List[Any] = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
_snake_case : int = [' '.join(['a'] ) * seq_length] * batch_size
_snake_case : int = dict(preprocessor(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
_snake_case : Union[str, Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_snake_case : Tuple = compute_effective_axis_dimension(lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
_snake_case : Optional[int] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Tuple = dict(preprocessor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
_snake_case : List[Any] = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 721 |
def A__( __lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('only integers accepted as input' )
else:
_snake_case : Any = str(abs(__lowerCAmelCase ) )
_snake_case : List[str] = [list(__lowerCAmelCase ) for char in range(len(__lowerCAmelCase ) )]
for index in range(len(__lowerCAmelCase ) ):
num_transpositions[index].pop(__lowerCAmelCase )
return max(
int(''.join(list(__lowerCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 652 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *lowerCamelCase_ : Any , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_snake_case : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : Dict = image_classifier(lowerCamelCase_ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCamelCase_ ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
_snake_case : Dict = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
] , )
@require_tf
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
_snake_case : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : Dict = image_classifier(lowerCamelCase_ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
_snake_case : Optional[int] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
[
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
{'score': 0.333, 'label': ANY(lowerCamelCase_ )},
],
] , )
@slow
@require_torch
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : Any = image_classifier(lowerCamelCase_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_snake_case : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
_snake_case : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_snake_case : Tuple = image_classifier(lowerCamelCase_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_snake_case : Tuple = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 700 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : Tuple = logging.getLogger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict=-1 ):
'''simple docstring'''
_snake_case : str = label_idx
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : Union[str, Any] = mode.value
_snake_case : Optional[int] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Dict = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
_snake_case : List[Any] = []
_snake_case : int = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
_snake_case : Optional[Any] = []
_snake_case : Union[str, Any] = []
else:
_snake_case : Tuple = line.split(' ' )
words.append(splits[0] )
if len(lowerCamelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
return examples
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : str = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(lowerCamelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_snake_case : List[str] = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(lowerCamelCase_ )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : Optional[int] = f.read().splitlines()
if "O" not in labels:
_snake_case : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
_snake_case : str = f.read().splitlines()
if "O" not in labels:
_snake_case : Union[str, Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowercase ( a_ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[Split, str] ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : str = mode.value
_snake_case : List[str] = os.path.join(lowerCamelCase_ , f'''{mode}.txt''' )
_snake_case : Tuple = 1
_snake_case : List[str] = []
with open(lowerCamelCase_ , encoding='utf-8' ) as f:
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : List[str] = []
_snake_case : str = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCamelCase_ , labels=lowerCamelCase_ ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : TextIO , lowerCamelCase_ : TextIO , lowerCamelCase_ : List ):
'''simple docstring'''
_snake_case : Dict = 0
for sentence in parse_incr(lowerCamelCase_ ):
_snake_case : Optional[int] = preds_list[example_id]
_snake_case : List[Any] = ''
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCamelCase_ )
example_id += 1
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
if path:
with open(lowerCamelCase_ , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 652 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A__( __lowerCAmelCase ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def A__( __lowerCAmelCase ):
# word like '180' or '身高' or '神'
for char in word:
_snake_case : List[str] = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def A__( __lowerCAmelCase ):
_snake_case : List[str] = set()
for token in tokens:
_snake_case : Optional[Any] = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
_snake_case : Any = list(__lowerCAmelCase )
return word_list
def A__( __lowerCAmelCase , __lowerCAmelCase ):
if not chinese_word_set:
return bert_tokens
_snake_case : int = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
_snake_case : Any = bert_tokens
_snake_case : Any = 0, len(__lowerCAmelCase )
while start < end:
_snake_case : str = True
if is_chinese(bert_word[start] ):
_snake_case : str = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
_snake_case : Any = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_snake_case : Tuple = '##' + bert_word[j]
_snake_case : List[Any] = start + i
_snake_case : Any = False
break
if single_word:
start += 1
return bert_word
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[str] = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
_snake_case : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['cws'] ).cws
_snake_case : Dict = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
_snake_case : Any = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
_snake_case : Union[str, Any] = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
_snake_case : List[Any] = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = []
for id in input_ids:
_snake_case : Union[str, Any] = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
_snake_case : str = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
_snake_case : Dict = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def A__( __lowerCAmelCase ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_snake_case : Tuple = f.readlines()
_snake_case : Any = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_snake_case : str = LTP(args.ltp ) # faster in GPU device
_snake_case : int = BertTokenizer.from_pretrained(args.bert )
_snake_case : List[str] = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_snake_case : int = [json.dumps(__lowerCAmelCase ) + '\n' for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
lowercase_ : List[str] = parser.parse_args()
main(args)
| 701 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = 1
_snake_case : str = 3
_snake_case : List[str] = (32, 32)
_snake_case : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCamelCase_ )
return image
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
def extract(*lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
class lowercase :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_snake_case : List[str] = torch.ones([0] )
def __UpperCAmelCase ( self : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
self.pixel_values.to(lowerCamelCase_ )
return self
return Out()
return extract
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
_snake_case : Union[str, Any] = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Union[str, Any] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : str = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Optional[int] = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : Optional[int] = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[str] = self.dummy_cond_unet
_snake_case : List[str] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : int = self.dummy_vae
_snake_case : List[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
_snake_case : Any = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : Union[str, Any] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : str = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe([prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
_snake_case : Optional[Any] = output.images
_snake_case : Optional[Any] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=lowerCamelCase_ , )[0]
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(pipe.scheduler , lowerCamelCase_ )
assert pipe.safety_checker is None
_snake_case : Dict = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase_ )
_snake_case : Any = StableDiffusionPipeline.from_pretrained(lowerCamelCase_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Union[str, Any] = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.dummy_cond_unet
_snake_case : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
_snake_case : Any = self.dummy_vae
_snake_case : Optional[Any] = self.dummy_text_encoder
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
_snake_case : str = unet.half()
_snake_case : Union[str, Any] = vae.half()
_snake_case : Dict = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : List[str] = StableDiffusionPipeline(
unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , vae=lowerCamelCase_ , text_encoder=lowerCamelCase_ , tokenizer=lowerCamelCase_ , safety_checker=lowerCamelCase_ , feature_extractor=self.dummy_extractor , )
_snake_case : List[str] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : Optional[int] = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Optional[int] = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : List[str] = 40_03_66_03_46
_snake_case : int = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Union[str, Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Union[str, Any] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : str = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_snake_case : Tuple = torch.manual_seed(lowerCamelCase_ )
_snake_case : int = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : Tuple = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : List[Any] = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase_ )
_snake_case : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : Union[str, Any] = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Optional[Any] = 27_34_97_17_55
_snake_case : Union[str, Any] = 7
_snake_case : Dict = torch.manual_seed(lowerCamelCase_ )
_snake_case : Tuple = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Any = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_snake_case : Optional[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
_snake_case : Optional[int] = sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_snake_case : List[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : Union[str, Any] = 10_44_35_52_34
_snake_case : Dict = 12
_snake_case : Optional[int] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Any = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=0 , )
_snake_case : Optional[int] = output.images
_snake_case : int = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_snake_case : List[Any] = torch.manual_seed(lowerCamelCase_ )
_snake_case : Optional[int] = sd_pipe(
[prompt] , generator=lowerCamelCase_ , guidance_scale=lowerCamelCase_ , num_inference_steps=50 , output_type='np' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_snake_case : str = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 652 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.