code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
def count_of_possible_combinations(lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowercase )
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
lowercase , lowercase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a =sum(
count_of_possible_combinations_with_dp_array(target - item , lowercase )
for item in array )
a =answer
return answer
a =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowercase , lowercase )
def _A ( lowercase , lowercase , lowercase ):
"""simple docstring"""
a =[0] * (target + 1)
a =1
for i in range(1 , target + 1 ):
for j in range(lowercase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : List[Any] = 3
lowerCamelCase_ : List[Any] = 5
lowerCamelCase_ : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 81 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 0 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
A__ = TypeVar("""KT""")
A__ = TypeVar("""VT""")
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self , _snake_case = "root" , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = key
_lowerCAmelCase = value
_lowerCAmelCase = []
def __repr__( self ):
"""simple docstring"""
return F'Node({self.key}: {self.value})'
@property
def snake_case ( self ):
"""simple docstring"""
return len(self.forward )
class __lowerCAmelCase ( Generic[KT, VT] ):
def __init__( self , _snake_case = 0.5 , _snake_case = 16 ):
"""simple docstring"""
_lowerCAmelCase = Node[KT, VT]()
_lowerCAmelCase = 0
_lowerCAmelCase = p
_lowerCAmelCase = max_level
def __str__( self ):
"""simple docstring"""
_lowerCAmelCase = list(self )
if len(_snake_case ) == 0:
return F'SkipList(level={self.level})'
_lowerCAmelCase = max((len(str(_snake_case ) ) for item in items) , default=4 )
_lowerCAmelCase = max(_snake_case , 4 ) + 4
_lowerCAmelCase = self.head
_lowerCAmelCase = []
_lowerCAmelCase = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(_snake_case , """-""" ) + """* """ * len(_snake_case ) )
lines.append(""" """ * label_size + """| """ * len(_snake_case ) )
while len(node.forward ) != 0:
_lowerCAmelCase = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(_snake_case , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(_snake_case ) )
_lowerCAmelCase = node.forward
lines.append("""None""".ljust(_snake_case ) + """* """ * len(_snake_case ) )
return F'SkipList(level={self.level})\n' + "\n".join(_snake_case )
def __iter__( self ):
"""simple docstring"""
_lowerCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_lowerCAmelCase = node.forward[0]
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_snake_case )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
for i, update_node in enumerate(_snake_case ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase = node.forward[i]
else:
_lowerCAmelCase = update_node.forward[:i]
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
_lowerCAmelCase = value
else:
_lowerCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _snake_case ):
update_vector.append(self.head )
_lowerCAmelCase = level
_lowerCAmelCase = Node(_snake_case , _snake_case )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_snake_case )
else:
_lowerCAmelCase = new_node
def snake_case ( self , _snake_case ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self._locate_node(_snake_case )
if node is not None:
return node.value
return None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
_lowerCAmelCase = skip_list.head
_lowerCAmelCase = {}
while node.level != 0:
_lowerCAmelCase = node.forward[0]
_lowerCAmelCase = node.value
assert len(snake_case ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
_lowerCAmelCase = skip_list.head
_lowerCAmelCase = {}
while node.level != 0:
_lowerCAmelCase = node.forward[0]
_lowerCAmelCase = node.value
if len(snake_case ) != 4:
print()
assert len(snake_case ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
assert skip_list.find("""Some key""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 1_42 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(snake_case ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _UpperCAmelCase ( ):
"""simple docstring"""
def is_sorted(snake_case ):
return all(next_item >= item for item, next_item in zip(snake_case , lst[1:] ) )
_lowerCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(snake_case , snake_case )
assert is_sorted(list(snake_case ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case ) )
def _UpperCAmelCase ( ):
"""simple docstring"""
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 82 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__ ( lowercase ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """ViTImageProcessor"""
lowercase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : Optional[int] ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Optional[Any]=None ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,lowerCamelCase__ ,)
_UpperCamelCase : List[Any] = kwargs.pop('feature_extractor' )
_UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __call__( self : List[Any] ,lowerCamelCase__ : List[Any]=None ,lowerCamelCase__ : Any=None ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : List[str] ):
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_UpperCamelCase : Optional[int] = self.tokenizer(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if visual_prompt is not None:
_UpperCamelCase : Any = self.image_processor(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if images is not None:
_UpperCamelCase : Union[str, Any] = self.image_processor(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if visual_prompt is not None and images is not None:
_UpperCamelCase : Dict = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_UpperCamelCase : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_UpperCamelCase : Dict = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) ,tensor_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : int ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : int ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,lowerCamelCase__ ,)
return self.image_processor_class
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' ,lowerCamelCase__ ,)
return self.image_processor
| 83 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 0 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
UpperCAmelCase_ :List[Any] = 0
UpperCAmelCase_ :int = 1
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = "generated"
def __init__( self , *__A , **__A ) -> int:
super().__init__(*__A , **__A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , **__A , ) -> str:
lowerCAmelCase_ :List[str] = {}
if truncation is not None:
lowerCAmelCase_ :List[Any] = truncation
lowerCAmelCase_ :Optional[int] = generate_kwargs
lowerCAmelCase_ :Optional[int] = {}
if return_tensors is not None and return_type is None:
lowerCAmelCase_ :Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowerCAmelCase_ :Any = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase_ :str = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase_ :int = self.tokenizer.encode(__A , add_special_tokens=__A )
if len(__A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCAmelCase_ :Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
return True
def __lowerCAmelCase ( self , *__A , __A ) -> List[Any]:
lowerCAmelCase_ :List[str] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
lowerCAmelCase_ :Dict = ([prefix + arg for arg in args[0]],)
lowerCAmelCase_ :Optional[int] = True
elif isinstance(args[0] , __A ):
lowerCAmelCase_ :Any = (prefix + args[0],)
lowerCAmelCase_ :Optional[Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowerCAmelCase_ :List[str] = self.tokenizer(*__A , padding=__A , truncation=__A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__A , **__A ) -> Optional[int]:
lowerCAmelCase_ :Any = super().__call__(*__A , **__A )
if (
isinstance(args[0] , __A )
and all(isinstance(__A , __A ) for el in args[0] )
and all(len(__A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowerCAmelCase ( self , __A , __A=TruncationStrategy.DO_NOT_TRUNCATE , **__A ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self._parse_and_tokenize(__A , truncation=__A , **__A )
return inputs
def __lowerCAmelCase ( self , __A , **__A ) -> str:
if self.framework == "pt":
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = tf.shape(model_inputs["""input_ids"""] ).numpy()
lowerCAmelCase_ :Optional[Any] = generate_kwargs.get("""min_length""" , self.model.config.min_length )
lowerCAmelCase_ :Union[str, Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
lowerCAmelCase_ :Optional[Any] = self.model.generate(**__A , **__A )
lowerCAmelCase_ :Optional[Any] = output_ids.shape[0]
if self.framework == "pt":
lowerCAmelCase_ :str = output_ids.reshape(__A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase_ :Tuple = tf.reshape(__A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowerCAmelCase ( self , __A , __A=ReturnType.TEXT , __A=False ) -> List[str]:
lowerCAmelCase_ :int = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowerCAmelCase_ :Optional[Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowerCAmelCase_ :str = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A , )
}
records.append(__A )
return records
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = "summary"
def __call__( self , *__A , **__A ) -> Tuple:
return super().__call__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = "translation"
def __lowerCAmelCase ( self , __A , __A , __A ) -> Dict:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def __lowerCAmelCase ( self , *__A , __A=TruncationStrategy.DO_NOT_TRUNCATE , __A=None , __A=None ) -> Union[str, Any]:
if getattr(self.tokenizer , """_build_translation_inputs""" , __A ):
return self.tokenizer._build_translation_inputs(
*__A , return_tensors=self.framework , truncation=__A , src_lang=__A , tgt_lang=__A )
else:
return super()._parse_and_tokenize(*__A , truncation=__A )
def __lowerCAmelCase ( self , __A=None , __A=None , **__A ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = super()._sanitize_parameters(**__A )
if src_lang is not None:
lowerCAmelCase_ :List[str] = src_lang
if tgt_lang is not None:
lowerCAmelCase_ :Tuple = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowerCAmelCase_ :Dict = kwargs.get("""task""" , self.task )
lowerCAmelCase_ :Any = task.split("""_""" )
if task and len(__A ) == 4:
# translation, XX, to YY
lowerCAmelCase_ :Optional[int] = items[1]
lowerCAmelCase_ :Optional[Any] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__A , **__A ) -> str:
return super().__call__(*__A , **__A )
| 84 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 0 |
'''simple docstring'''
import operator as op
_SCREAMING_SNAKE_CASE : Optional[int] = "scaler.pt"
_SCREAMING_SNAKE_CASE : Tuple = "pytorch_model"
_SCREAMING_SNAKE_CASE : Optional[Any] = "random_states"
_SCREAMING_SNAKE_CASE : str = "optimizer"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "scheduler"
_SCREAMING_SNAKE_CASE : Optional[int] = "pytorch_model.bin"
_SCREAMING_SNAKE_CASE : Dict = "pytorch_model.bin.index.json"
_SCREAMING_SNAKE_CASE : Dict = "model.safetensors"
_SCREAMING_SNAKE_CASE : Optional[int] = "model.safetensors.index.json"
_SCREAMING_SNAKE_CASE : Union[str, Any] = "1.10.2"
_SCREAMING_SNAKE_CASE : str = "py38"
_SCREAMING_SNAKE_CASE : Dict = "4.17.0"
_SCREAMING_SNAKE_CASE : Tuple = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
_SCREAMING_SNAKE_CASE : Optional[int] = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
_SCREAMING_SNAKE_CASE : Any = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
_SCREAMING_SNAKE_CASE : int = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
_SCREAMING_SNAKE_CASE : Any = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
_SCREAMING_SNAKE_CASE : str = "2.0.1"
_SCREAMING_SNAKE_CASE : int = ["pdsh", "standard", "openmpi", "mvapich"]
_SCREAMING_SNAKE_CASE : Optional[Any] = ["default", "reduce-overhead", "max-autotune"]
_SCREAMING_SNAKE_CASE : Optional[int] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_SCREAMING_SNAKE_CASE : Dict = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
_SCREAMING_SNAKE_CASE : Tuple = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
_SCREAMING_SNAKE_CASE : List[Any] = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 85 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]):
lowercase__ : str = 1.5
lowercase__ : Any = int(factor * num_class_images)
lowercase__ : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1)
os.makedirs(f'''{class_data_dir}/images''' , exist_ok=_lowerCamelCase)
if len(list(Path(f'''{class_data_dir}/images''').iterdir())) >= num_class_images:
return
while True:
lowercase__ : Dict = client.query(text=_lowerCamelCase)
if len(_lowerCamelCase) >= factor * num_class_images or num_images > 1E4:
break
else:
lowercase__ : List[Any] = int(factor * num_images)
lowercase__ : Any = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowerCamelCase , aesthetic_weight=0.1 , )
lowercase__ : List[str] = 0
lowercase__ : Dict = 0
lowercase__ : int = tqdm(desc="downloading real regularization images" , total=_lowerCamelCase)
with open(f'''{class_data_dir}/caption.txt''' , "w") as fa, open(f'''{class_data_dir}/urls.txt''' , "w") as fa, open(
f'''{class_data_dir}/images.txt''' , "w") as fa:
while total < num_class_images:
lowercase__ : List[str] = class_images[count]
count += 1
try:
lowercase__ : Union[str, Any] = requests.get(images["url"])
if img.status_code == 200:
lowercase__ : List[str] = Image.open(BytesIO(img.content))
with open(f'''{class_data_dir}/images/{total}.jpg''' , "wb") as f:
f.write(img.content)
fa.write(images["caption"] + "\n")
fa.write(images["url"] + "\n")
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + "\n")
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def lowercase_ ( ):
lowercase__ : Optional[int] = argparse.ArgumentParser("" , add_help=_lowerCamelCase)
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowerCamelCase , type=_lowerCamelCase)
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowerCamelCase , type=_lowerCamelCase)
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowerCamelCase)
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 87 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = TaConfig.from_json_file(A_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__magic_name__ = TaForConditionalGeneration(A_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(A_, A_, A_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A_ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 88 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 0 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase = '''true'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=16 ) -> Union[str, Any]:
set_seed(42 )
_a : Union[str, Any] = RegressionModel()
_a : Dict = deepcopy(lowerCAmelCase_ )
_a : Any = RegressionDataset(length=lowerCAmelCase_ )
_a : List[str] = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
model.to(accelerator.device )
_a , _a : int = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
return model, ddp_model, dataloader
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]:
_a : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
_a : Dict = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(lowerCAmelCase_ ):
_a : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
with accelerator.main_process_first():
_a : int = dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
_a : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCAmelCase_ ):
if use_longest:
return tokenizer.pad(lowerCAmelCase_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(lowerCAmelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(lowerCAmelCase_ , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=16 )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : Optional[int] = Accelerator(dispatch_batches=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_a : Optional[Any] = get_dataloader(lowerCAmelCase_ , not dispatch_batches )
_a : List[str] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=lowerCAmelCase_ )
_a , _a : Optional[int] = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_a : Union[str, Any] = []
for batch in dataloader:
_a , _a : Any = batch.values()
with torch.no_grad():
_a : str = model(lowerCAmelCase_ )
_a , _a : Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_a , _a : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase_ )
targs.append(lowerCAmelCase_ )
_a , _a : Dict = torch.cat(lowerCAmelCase_ ), torch.cat(lowerCAmelCase_ )
return logits, targs
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=82 , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=16 ) -> List[Any]:
_a , _a , _a : int = get_basic_setup(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a , _a : Union[str, Any] = generate_predictions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert (
len(lowerCAmelCase_ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase_ )}"""
def __lowerCamelCase ( lowerCAmelCase_ = False , lowerCAmelCase_ = False ) -> List[Any]:
_a : Optional[Any] = evaluate.load('glue' , 'mrpc' )
_a , _a : Any = get_mrpc_setup(lowerCAmelCase_ , lowerCAmelCase_ )
# First do baseline
_a , _a , _a : str = setup['no']
model.to(lowerCAmelCase_ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase_ )
with torch.inference_mode():
_a : List[Any] = model(**lowerCAmelCase_ )
_a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase_ , references=batch['labels'] )
_a : Any = metric.compute()
# Then do distributed
_a , _a , _a : int = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_a : List[str] = model(**lowerCAmelCase_ )
_a : int = outputs.logits.argmax(dim=-1 )
_a : Optional[int] = batch['labels']
_a , _a : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
_a : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __lowerCamelCase ( ) -> str:
_a : Optional[int] = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowerCAmelCase_ , lowerCAmelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_a : Optional[Any] = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowerCAmelCase_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
_a : int = Accelerator()
test_torch_metrics(lowerCAmelCase_ , 512 )
accelerator.state._reset_state()
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 89 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=3 , lowerCamelCase__=4 , lowerCamelCase__=None , ) -> Any:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self ) -> int:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = BioGptModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) -> str:
'''simple docstring'''
__lowerCamelCase = BioGptForCausalLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = BioGptModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# create attention mask
__lowerCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase__ )
__lowerCamelCase = self.seq_length // 2
__lowerCamelCase = 0
# first forward pass
__lowerCamelCase , __lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__lowerCamelCase = ids_tensor((1,) , lowerCamelCase__ ).item() + 1
__lowerCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__lowerCamelCase = random_other_next_tokens
# append to next input_ids and attn_mask
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase__ )] , dim=1 , )
# get two different outputs
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )['last_hidden_state']
__lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ )['last_hidden_state']
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = BioGptModel(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase__ )
# first forward pass
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , use_cache=lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )['last_hidden_state']
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , past_key_values=lowerCamelCase__ )[
'last_hidden_state'
]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , lowerCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = BioGptForCausalLM(lowerCamelCase__ )
model.to(lowerCamelCase__ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__lowerCamelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowercase_ ( self , lowerCamelCase__ , *lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = BioGptModel(lowerCamelCase__ )
__lowerCamelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = BioGptForTokenClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = BioGptModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase__ , gradient_checkpointing=lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase__ )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCamelCase__ )
__lowerCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__lowerCamelCase = 'left'
# Define PAD Token = EOS Token = 50256
__lowerCamelCase = tokenizer.eos_token
__lowerCamelCase = model.config.eos_token_id
# use different length sentences to test batching
__lowerCamelCase = [
'Hello, my dog is a little',
'Today, I',
]
__lowerCamelCase = tokenizer(lowerCamelCase__ , return_tensors='pt' , padding=lowerCamelCase__ )
__lowerCamelCase = inputs['input_ids'].to(lowerCamelCase__ )
__lowerCamelCase = model.generate(
input_ids=lowerCamelCase__ , attention_mask=inputs['attention_mask'].to(lowerCamelCase__ ) , )
__lowerCamelCase = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCamelCase__ )
__lowerCamelCase = model.generate(input_ids=lowerCamelCase__ )
__lowerCamelCase = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
__lowerCamelCase = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCamelCase__ )
__lowerCamelCase = model.generate(input_ids=lowerCamelCase__ , max_length=model.config.max_length - num_paddings )
__lowerCamelCase = tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
@slow
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = BioGptModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = input_dict['input_ids']
__lowerCamelCase = input_ids.ne(1 ).to(lowerCamelCase__ )
__lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__lowerCamelCase = BioGptForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = 3
__lowerCamelCase = 'multi_label_classification'
__lowerCamelCase = input_dict['input_ids']
__lowerCamelCase = input_ids.ne(1 ).to(lowerCamelCase__ )
__lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__lowerCamelCase = BioGptForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , labels=lowerCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
__lowerCamelCase = torch.tensor([[2, 4_805, 9, 656, 21]] )
__lowerCamelCase = model(lowerCamelCase__ )[0]
__lowerCamelCase = 42_384
__lowerCamelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
__lowerCamelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCamelCase__ )
torch.manual_seed(0 )
__lowerCamelCase = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = model.generate(
**lowerCamelCase__ , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=lowerCamelCase__ , )
__lowerCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
__lowerCamelCase = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
| 90 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "tokenizer"]
__UpperCamelCase = "Pix2StructImageProcessor"
__UpperCamelCase = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self : Any , lowercase_ : Dict , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = False
super().__init__(lowercase_ , lowercase_)
def __call__( self : Dict , lowercase_ : Optional[int]=None , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = 2048 , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : List[str] , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''')
# Get only text
if images is None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
SCREAMING_SNAKE_CASE_ : Any = self.image_processor(
lowercase_ , return_tensors=lowercase_ , max_patches=lowercase_ , **lowercase_)
else:
# add pixel_values and bbox
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , max_patches=lowercase_ , header_text=lowercase_ , **lowercase_)
if text is not None and not self.image_processor.is_vqa:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
if "attention_mask" in text_encoding:
SCREAMING_SNAKE_CASE_ : List[Any] = text_encoding.pop('''attention_mask''')
if "input_ids" in text_encoding:
SCREAMING_SNAKE_CASE_ : Dict = text_encoding.pop('''input_ids''')
else:
SCREAMING_SNAKE_CASE_ : str = None
if text_encoding is not None:
encoding_image_processor.update(lowercase_)
return encoding_image_processor
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowercase_ : str , **lowercase_ : List[str]):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : List[Any] , **lowercase_ : Any):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 91 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = filter(lambda SCREAMING_SNAKE_CASE_ : p.requires_grad , model.parameters() )
__lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCamelCase__ = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ):
if metric == "rouge2":
__lowerCAmelCase = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
__lowerCAmelCase = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
__lowerCAmelCase = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
" function." )
__lowerCAmelCase = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE_ , filename=SCREAMING_SNAKE_CASE_ , monitor=F"""val_{metric}""" , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="min" if "loss" in metric else "max" , patience=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , )
class a__ ( pl.Callback ):
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
__lowerCAmelCase = {f"""lr_group_{i}""": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A=True ):
"""simple docstring"""
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__lowerCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
__lowerCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase = od / "test_results.txt"
__lowerCAmelCase = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
__lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , "a+" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase = metrics[key]
if isinstance(_A , torch.Tensor ):
__lowerCAmelCase = val.item()
__lowerCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(_A )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
try:
__lowerCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase = pl_module.model.num_parameters()
__lowerCAmelCase = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , "test" )
@rank_zero_only
def __SCREAMING_SNAKE_CASE( self , _A , _A ):
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 92 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_00 , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=30 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , ):
"""simple docstring"""
lowercase_ : Optional[int] = parent
lowercase_ : Tuple = 1_00
lowercase_ : Any = batch_size
lowercase_ : Union[str, Any] = image_size
lowercase_ : Dict = patch_size
lowercase_ : List[str] = num_channels
lowercase_ : str = is_training
lowercase_ : Any = use_labels
lowercase_ : Optional[int] = hidden_size
lowercase_ : List[Any] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : Any = hidden_act
lowercase_ : List[Any] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Dict = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : str = scope
lowercase_ : str = out_indices
lowercase_ : List[str] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Tuple = (image_size // patch_size) ** 2
lowercase_ : int = num_patches + 1
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[str] = None
lowercase_ : int = None
if self.use_labels:
lowercase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self ):
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[str] = BeitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = BeitForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : int = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.type_sequence_label_size
lowercase_ : List[Any] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Optional[int] = 1
lowercase_ : Optional[Any] = BeitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Tuple = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = self.num_labels
lowercase_ : List[str] = BeitForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase_ : int = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Any = BeitModelTester(self )
lowercase_ : Dict = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def _snake_case ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Any = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : int = [*signature.parameters.keys()]
lowercase_ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]:
continue
lowercase_ : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : str = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Any = False
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__SCREAMING_SNAKE_CASE ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase_ : int = model_class(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(__SCREAMING_SNAKE_CASE )
model.train()
lowercase_ : Dict = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _snake_case ( self ):
"""simple docstring"""
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(config=__SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Dict = BeitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
"""simple docstring"""
lowercase_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : int = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : int = self.default_image_processor
lowercase_ : Optional[int] = prepare_img()
lowercase_ : str = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values.to(__SCREAMING_SNAKE_CASE )
# prepare bool_masked_pos
lowercase_ : Union[str, Any] = torch.ones((1, 1_96) , dtype=torch.bool ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : str = model(pixel_values=__SCREAMING_SNAKE_CASE , bool_masked_pos=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = outputs.logits
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = torch.tensor(
[[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-2 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = outputs.logits
# verify the logits
lowercase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase_ : Any = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Dict = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.default_image_processor
lowercase_ : Any = prepare_img()
lowercase_ : Any = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : List[str] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = outputs.logits
# verify the logits
lowercase_ : Union[str, Any] = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
lowercase_ : str = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __SCREAMING_SNAKE_CASE )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase_ : Any = model.to(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_40 , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase_ : Any = Image.open(ds[0]['''file'''] )
lowercase_ : Tuple = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = outputs.logits
# verify the logits
lowercase_ : Any = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , __SCREAMING_SNAKE_CASE )
lowercase_ : int = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowercase_ : Dict = torch.tensor(
[
[[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]],
[[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]],
[[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]],
] , device=__SCREAMING_SNAKE_CASE , )
else:
lowercase_ : int = torch.tensor(
[
[[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]],
[[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]],
[[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]],
] , device=__SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase_ : str = model.to(__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = BeitImageProcessor(do_resize=__SCREAMING_SNAKE_CASE , size=6_40 , do_center_crop=__SCREAMING_SNAKE_CASE )
lowercase_ : int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase_ : Optional[int] = Image.open(ds[0]['''file'''] )
lowercase_ : Union[str, Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = outputs.logits.detach().cpu()
lowercase_ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE , target_sizes=[(5_00, 3_00)] )
lowercase_ : List[Any] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , __SCREAMING_SNAKE_CASE )
| 93 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
def __lowerCamelCase ( UpperCAmelCase_ : list ):
"""simple docstring"""
if len(UpperCAmelCase_ ) < 2:
return collection
def circle_sort_util(UpperCAmelCase_ : list , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> bool:
a :Dict = False
if low == high:
return swapped
a :Tuple = low
a :Tuple = high
while left < right:
if collection[left] > collection[right]:
a , a :List[Any] = (
collection[right],
collection[left],
)
a :List[Any] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
a , a :int = (
collection[right + 1],
collection[left],
)
a :Union[str, Any] = True
a :Union[str, Any] = low + int((high - low) / 2 )
a :int = circle_sort_util(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :str = circle_sort_util(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ )
return swapped or left_swap or right_swap
a :str = True
while is_not_sorted is True:
a :Tuple = circle_sort_util(UpperCAmelCase_ , 0 , len(UpperCAmelCase_ ) - 1 )
return collection
if __name__ == "__main__":
snake_case : int = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 94 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 0 |
from collections.abc import Generator
from math import sin
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) != 32:
raise ValueError("Input must be of length 32" )
a__ : Optional[Any] =b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
a__ : Optional[Any] =format(SCREAMING_SNAKE_CASE , "08x" )[-8:]
a__ : Tuple =b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
a__ : Any =b""
for char in message:
bit_string += format(SCREAMING_SNAKE_CASE , "08b" ).encode("utf-8" )
a__ : Optional[Any] =format(len(SCREAMING_SNAKE_CASE ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(SCREAMING_SNAKE_CASE ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(SCREAMING_SNAKE_CASE ) , 512 ):
a__ : List[str] =bit_string[pos : pos + 512]
a__ : Union[str, Any] =[]
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
a__ : Optional[int] =format(SCREAMING_SNAKE_CASE , "032b" )
a__ : Optional[int] =""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(SCREAMING_SNAKE_CASE , 2 )
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return (a + b) % 2**32
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _A ( SCREAMING_SNAKE_CASE : bytes ):
"""simple docstring"""
a__ : str =preprocess(SCREAMING_SNAKE_CASE )
a__ : List[str] =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
a__ : List[Any] =0x67_452_301
a__ : Optional[Any] =0xEF_CDA_B89
a__ : List[Any] =0x98_BAD_CFE
a__ : Any =0x10_325_476
a__ : str =[
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(SCREAMING_SNAKE_CASE ):
a__ : List[Any] =aa
a__ : List[Any] =ba
a__ : List[Any] =ca
a__ : List[str] =da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
a__ : Tuple =d ^ (b & (c ^ d))
a__ : str =i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
a__ : Any =c ^ (d & (b ^ c))
a__ : str =(5 * i + 1) % 16
elif i <= 47:
a__ : Dict =b ^ c ^ d
a__ : Optional[Any] =(3 * i + 5) % 16
else:
a__ : Optional[Any] =c ^ (b | not_aa(SCREAMING_SNAKE_CASE ))
a__ : str =(7 * i) % 16
a__ : Optional[Any] =(f + a + added_consts[i] + block_words[g]) % 2**32
a__ : List[Any] =d
a__ : List[str] =c
a__ : int =b
a__ : Dict =sum_aa(SCREAMING_SNAKE_CASE , left_rotate_aa(SCREAMING_SNAKE_CASE , shift_amounts[i] ) )
# Add hashed chunk to running total
a__ : Union[str, Any] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : List[Any] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : int =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : List[str] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Tuple =reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = KandinskyInpaintPipeline
lowerCamelCase__ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
lowerCamelCase__ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
lowerCamelCase__ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
lowerCamelCase__ = False
@property
def A_ ( self ):
return 32
@property
def A_ ( self ):
return 32
@property
def A_ ( self ):
return self.time_input_dim
@property
def A_ ( self ):
return self.time_input_dim * 4
@property
def A_ ( self ):
return 100
@property
def A_ ( self ):
_lowerCamelCase : Tuple = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCamelCase : str = MultilingualCLIP(lowercase )
_lowerCamelCase : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[str] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**lowercase )
return model
@property
def A_ ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self ):
_lowerCamelCase : Optional[int] = self.dummy_text_encoder
_lowerCamelCase : Optional[Any] = self.dummy_tokenizer
_lowerCamelCase : Optional[int] = self.dummy_unet
_lowerCamelCase : Tuple = self.dummy_movq
_lowerCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowercase , set_alpha_to_one=lowercase , steps_offset=1 , prediction_type='epsilon' , thresholding=lowercase , )
_lowerCamelCase : List[str] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def A_ ( self , lowercase , lowercase=0 ):
_lowerCamelCase : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowercase ) ).to(lowercase )
_lowerCamelCase : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowercase )
# create init_image
_lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCamelCase : List[str] = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCamelCase : Tuple = np.ones((64, 64) , dtype=np.floataa )
_lowerCamelCase : Optional[int] = 0
if str(lowercase ).startswith('mps' ):
_lowerCamelCase : List[Any] = torch.manual_seed(lowercase )
else:
_lowerCamelCase : List[Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
_lowerCamelCase : str = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def A_ ( self ):
_lowerCamelCase : List[Any] = 'cpu'
_lowerCamelCase : Tuple = self.get_dummy_components()
_lowerCamelCase : List[str] = self.pipeline_class(**lowercase )
_lowerCamelCase : Optional[int] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : int = pipe(**self.get_dummy_inputs(lowercase ) )
_lowerCamelCase : List[Any] = output.images
_lowerCamelCase : Dict = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
_lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : List[Any] = np.array(
[0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def A_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
_lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
_lowerCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCamelCase : str = np.ones((768, 768) , dtype=np.floataa )
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = 'a hat'
_lowerCamelCase : Optional[int] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
_lowerCamelCase : List[Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
_lowerCamelCase : Optional[int] = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCamelCase : int = pipeline(
lowercase , image=lowercase , mask_image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase ) | 96 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
UpperCamelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
UpperCamelCase__ :List[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
UpperCamelCase__ :Union[str, Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
UpperCamelCase__ :Union[str, Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ ).loss
UpperCamelCase__ :Union[str, Any] = -tf.math.reduce_mean(UpperCamelCase_ ).numpy()
UpperCamelCase__ :Tuple = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 ) | 97 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "ClapFeatureExtractor"
snake_case__ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ):
super().__init__(lowerCamelCase__ ,lowerCamelCase__ )
def __call__( self : List[Any] ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Dict=None ,lowerCamelCase__ : Any=None ,**lowerCamelCase__ : str ):
UpperCAmelCase__ = kwargs.pop('sampling_rate' ,lowerCamelCase__ )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
UpperCAmelCase__ = self.tokenizer(lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if audios is not None:
UpperCAmelCase__ = self.feature_extractor(
lowerCamelCase__ ,sampling_rate=lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,**lowerCamelCase__ )
if text is not None and audios is not None:
UpperCAmelCase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) ,tensor_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Tuple ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[Any] ):
return self.tokenizer.batch_decode(*lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Optional[int] ):
return self.tokenizer.decode(*lowerCamelCase__ ,**lowerCamelCase__ )
@property
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = self.tokenizer.model_input_names
UpperCAmelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 98 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 0 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , lowercase=2 , lowercase=99 , lowercase=0 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=12 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase="last" , lowercase=None , lowercase=None , ) -> Any:
'''simple docstring'''
a__ : Optional[Any] = parent
a__ : Dict = batch_size
a__ : int = seq_length
a__ : str = is_training
a__ : Any = use_input_lengths
a__ : Any = use_token_type_ids
a__ : Union[str, Any] = use_labels
a__ : int = gelu_activation
a__ : Optional[Any] = sinusoidal_embeddings
a__ : int = causal
a__ : Optional[Any] = asm
a__ : str = n_langs
a__ : List[str] = vocab_size
a__ : Union[str, Any] = n_special
a__ : Optional[int] = hidden_size
a__ : Union[str, Any] = num_hidden_layers
a__ : Dict = num_attention_heads
a__ : Union[str, Any] = hidden_dropout_prob
a__ : Optional[Any] = attention_probs_dropout_prob
a__ : Optional[Any] = max_position_embeddings
a__ : int = type_vocab_size
a__ : Optional[Any] = type_sequence_label_size
a__ : Optional[Any] = initializer_range
a__ : Tuple = num_labels
a__ : Union[str, Any] = num_choices
a__ : Tuple = summary_type
a__ : List[Any] = use_proj
a__ : Union[str, Any] = scope
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
a__ : List[str] = None
if self.use_input_lengths:
a__ : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a__ : List[str] = None
if self.use_token_type_ids:
a__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a__ : List[str] = None
a__ : Tuple = None
a__ : Optional[Any] = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : Union[str, Any] = ids_tensor([self.batch_size] , 2).float()
a__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices)
a__ : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = FlaubertModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Optional[int] = model(lowercase , lengths=lowercase , langs=lowercase)
a__ : Tuple = model(lowercase , langs=lowercase)
a__ : Union[str, Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
'''simple docstring'''
a__ : Dict = FlaubertWithLMHeadModel(lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = FlaubertForQuestionAnsweringSimple(lowercase)
model.to(lowercase)
model.eval()
a__ : Optional[Any] = model(lowercase)
a__ : Dict = model(lowercase , start_positions=lowercase , end_positions=lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
'''simple docstring'''
a__ : str = FlaubertForQuestionAnswering(lowercase)
model.to(lowercase)
model.eval()
a__ : Dict = model(lowercase)
a__ : Optional[int] = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , p_mask=lowercase , )
a__ : Optional[int] = model(
lowercase , start_positions=lowercase , end_positions=lowercase , cls_index=lowercase , is_impossible=lowercase , )
((a__) , ) : int = result_with_labels.to_tuple()
a__ : Any = model(lowercase , start_positions=lowercase , end_positions=lowercase)
((a__) , ) : Optional[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
'''simple docstring'''
a__ : Optional[int] = FlaubertForSequenceClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : List[Any] = model(lowercase)
a__ : Optional[Any] = model(lowercase , labels=lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> List[Any]:
'''simple docstring'''
a__ : Dict = self.num_labels
a__ : Any = FlaubertForTokenClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : List[str] = model(lowercase , attention_mask=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[int] = self.num_choices
a__ : List[Any] = FlaubertForMultipleChoice(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : Tuple = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[Any] = config_and_inputs
a__ : Any = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Dict = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__A : List[Any] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> int:
'''simple docstring'''
a__ : Union[str, Any] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
a__ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase)
a__ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase)
return inputs_dict
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = FlaubertModelTester(self)
a__ : List[str] = ConfigTester(self , config_class=lowercase , emb_dim=37)
def __lowercase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase)
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Tuple = FlaubertModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@slow
@require_torch_gpu
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
a__ : Optional[int] = True
a__ : Any = model_class(config=lowercase)
a__ : Any = self._prepare_for_class(lowercase , lowercase)
a__ : Any = torch.jit.trace(
lowercase , (inputs_dict['input_ids'].to('cpu'), inputs_dict['attention_mask'].to('cpu')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase , os.path.join(lowercase , 'traced_model.pt'))
a__ : Tuple = torch.jit.load(os.path.join(lowercase , 'traced_model.pt') , map_location=lowercase)
loaded(inputs_dict['input_ids'].to(lowercase) , inputs_dict['attention_mask'].to(lowercase))
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased')
a__ : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
with torch.no_grad():
a__ : str = model(lowercase)[0]
a__ : Dict = torch.Size((1, 11, 768))
self.assertEqual(output.shape , lowercase)
a__ : Optional[Any] = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4))
| 99 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 0 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCamelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _distribute_shards(**UpperCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = _split_gen_kwargs(UpperCamelCase_ , UpperCamelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if expected is RuntimeError:
with pytest.raises(UpperCamelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCamelCase_ )
else:
__SCREAMING_SNAKE_CASE = _number_of_shards_in_gen_kwargs(UpperCamelCase_ )
assert out == expected
| 100 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
from __future__ import annotations
from random import random
class lowercase :
def __init__( self ,A__ = None):
lowercase = value
lowercase = random()
lowercase = None
lowercase = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1)
def __str__( self):
lowercase = str(self.value) + ''' '''
lowercase = str(self.left or '''''')
lowercase = str(self.right or '''''')
return value + left + right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowercase , lowercase = split(root.left , lowerCAmelCase__ )
return left, root
else:
lowercase , lowercase = split(root.right , lowerCAmelCase__ )
return root, right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowercase = merge(left.right , lowerCAmelCase__ )
return left
else:
lowercase = merge(lowerCAmelCase__ , right.left )
return right
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Node(lowerCAmelCase__ )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase , lowercase = split(lowerCAmelCase__ , value - 1 )
lowercase , lowercase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
lowercase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
lowercase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
lowercase = input()
while args != "q":
lowercase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
lowercase = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 101 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE : List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase ( _snake_case : Optional[Any] , _snake_case : List[str] , _snake_case : List[str] , _snake_case : Any , _snake_case : Any ) ->Tuple:
"""simple docstring"""
for attribute in key.split('''.''' ):
__snake_case : Optional[Any] = getattr(_snake_case , _snake_case )
if weight_type is not None:
__snake_case : Tuple = getattr(_snake_case , _snake_case ).shape
else:
__snake_case : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__snake_case : int = value
elif weight_type == "weight_g":
__snake_case : List[str] = value
elif weight_type == "weight_v":
__snake_case : str = value
elif weight_type == "bias":
__snake_case : Any = value
else:
__snake_case : Dict = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowercase ( _snake_case : Tuple , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
__snake_case : List[Any] = []
__snake_case : str = fairseq_model.state_dict()
__snake_case : Optional[Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__snake_case : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_snake_case , _snake_case , _snake_case , _snake_case , hf_model.config.feat_extract_norm == '''group''' , )
__snake_case : Tuple = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__snake_case : str = True
if "*" in mapped_key:
__snake_case : Union[str, Any] = name.split(_snake_case )[0].split('''.''' )[-2]
__snake_case : str = mapped_key.replace('''*''' , _snake_case )
if "weight_g" in name:
__snake_case : Dict = '''weight_g'''
elif "weight_v" in name:
__snake_case : List[Any] = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
__snake_case : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__snake_case : List[Any] = '''weight'''
else:
__snake_case : str = None
set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
continue
if not is_used:
unused_weights.append(_snake_case )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowercase ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : str = full_name.split('''conv_layers.''' )[-1]
__snake_case : Union[str, Any] = name.split('''.''' )
__snake_case : Optional[Any] = int(items[0] )
__snake_case : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__snake_case : Optional[Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__snake_case : Union[str, Any] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__snake_case : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__snake_case : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_snake_case )
@torch.no_grad()
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] , _snake_case : str=None ) ->str:
"""simple docstring"""
__snake_case : Optional[Any] = torch.load(_snake_case )
__snake_case : Any = WavLMConfigOrig(checkpoint['''cfg'''] )
__snake_case : Tuple = WavLMOrig(_snake_case )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
__snake_case : List[str] = WavLMConfig.from_pretrained(_snake_case )
else:
__snake_case : Optional[Any] = WavLMConfig()
__snake_case : int = WavLMModel(_snake_case )
recursively_load_weights(_snake_case , _snake_case )
hf_wavlm.save_pretrained(_snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 102 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 0 |
import heapq as hq
import math
from collections.abc import Iterator
class __snake_case :
def __init__( self : str , A_ : int):
lowerCAmelCase_ : Optional[int] = str(id_)
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Any = {} # {vertex:distance}
def __lt__( self : Tuple , A_ : Optional[int]):
return self.key < other.key
def __repr__( self : List[Any]):
return self.id
def UpperCAmelCase__ ( self : int , A_ : str):
self.neighbors.append(A_)
def UpperCAmelCase__ ( self : Dict , A_ : List[Any] , A_ : Tuple):
lowerCAmelCase_ : List[Any] = weight
def UpperCamelCase( __UpperCamelCase : List[str] ,__UpperCamelCase : Any ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] ,__UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] ,__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : Vertex ):
lowerCAmelCase_ : int = []
for u in graph:
lowerCAmelCase_ : Union[str, Any] = math.inf
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Tuple = graph[:]
while q:
lowerCAmelCase_ : Any = min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCAmelCase_ : Optional[Any] = u
lowerCAmelCase_ : Any = u.edges[v.id]
for i in range(1 ,len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def UpperCamelCase( __UpperCamelCase : list ,__UpperCamelCase : Vertex ):
for u in graph:
lowerCAmelCase_ : List[Any] = math.inf
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[Any] = list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
lowerCAmelCase_ : List[str] = hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCAmelCase_ : Dict = u
lowerCAmelCase_ : List[Any] = u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 ,len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def UpperCamelCase( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 0 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def _A ( A__ , A__=1.0 , A__=None , A__=None ):
"""simple docstring"""
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=7 ,lowercase__ : Dict=4_0_0 ,lowercase__ : Tuple=2_0_0_0 ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[int]=1_6_0 ,lowercase__ : Dict=8 ,lowercase__ : str=0.0 ,lowercase__ : Union[str, Any]=4_0_0_0 ,lowercase__ : Optional[int]=False ,lowercase__ : List[str]=True ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
__lowercase = feature_size
__lowercase = chunk_length
__lowercase = hop_length
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str]=False ,lowercase__ : Dict=False ):
def _flatten(lowercase__ : Optional[Any] ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = feat_extract_first.save_pretrained(lowercase__ )[0]
check_json_file_has_correct_format(lowercase__ )
__lowercase = self.feature_extraction_class.from_pretrained(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''feat_extract.json''' )
feat_extract_first.to_json_file(lowercase__ )
__lowercase = self.feature_extraction_class.from_json_file(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase__ ,padding='''max_length''' ,return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] ,return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase = np.asarray(lowercase__ )
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test truncation required
__lowercase = [floats_list((1, x) )[0] for x in range(2_0_0 ,(feature_extractor.n_samples + 5_0_0) ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
__lowercase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs_truncated]
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(1_0_0 ,3_2 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
# automatic decoding with librispeech
__lowercase = ds.sort('''id''' ).select(range(lowercase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
__lowercase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = WhisperFeatureExtractor()
__lowercase = feature_extractor(lowercase__ ,return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape ,(1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] ,lowercase__ ,atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = self._load_datasamples(1 )[0]
__lowercase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
__lowercase = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=lowercase__ )[0]
self.assertTrue(np.all(np.mean(lowercase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase__ ) - 1 ) < 1e-3 ) )
| 104 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 0 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : List[str] = logging.get_logger(__name__)
a : List[str] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
a : List[str] = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
a : Optional[int] = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Tuple ) ->List[Any]:
'''simple docstring'''
with open(_lowercase , "r" , encoding="utf-8" ) as f:
a : Optional[Any] = json.loads(f.read() )
a : str = collections.OrderedDict()
a : Any = collections.OrderedDict()
a : str = collections.OrderedDict()
with open(_lowercase , "r" , encoding="utf-8" ) as f:
a : Optional[int] = f.readlines()
a : Any = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(_lowercase ):
a : Optional[int] = b
a : Dict = idx
for wd in b:
a : List[Any] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int =["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__="<|startoftext|>" , lowerCAmelCase__="<|endoftext|>" , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> int:
super().__init__(
unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , do_clean_text=lowerCAmelCase__ , **lowerCAmelCase__ , )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
a : Tuple = do_clean_text
a, a, a, a : Any = load_vocab_and_emoji(lowerCAmelCase__ , lowerCAmelCase__ )
a : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __a ( self ) -> Optional[int]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __a ( self ) -> Union[str, Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __a ( self , lowerCAmelCase__ ) -> Dict:
return self.subword_tokenizer.tokenize(lowerCAmelCase__ , clean=self.do_clean_text )
def __a ( self , lowerCAmelCase__ ) -> str:
return self.vocab.get(lowerCAmelCase__ , self.vocab.get(self.unk_token ) )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> str:
a : Union[str, Any] = "".join(lowerCAmelCase__ ).strip()
return out_string
def __a ( self , lowerCAmelCase__ ) -> List[int]:
a : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
a : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : Optional[int] = 0
if os.path.isdir(lowerCAmelCase__ ):
a : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
a : Any = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a : List[Any] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
a : str = token_index
writer.write(",".join(lowerCAmelCase__ ) + "\n" )
index += 1
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , lowerCAmelCase__ )
return vocab_file, emoji_file
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Any = vocab # same as swe
a : List[str] = ids_to_tokens # same as bpe
a : Optional[Any] = emoji
a : Any = np.max([len(lowerCAmelCase__ ) for w in self.vocab.keys()] )
a : Dict = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
a : Tuple = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
a : Union[str, Any] = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
a : Optional[Any] = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
a : Dict = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
a : List[str] = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
a : Optional[int] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a : Optional[Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a : List[str] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self ) -> Union[str, Any]:
return len(self.ids_to_tokens )
def __a ( self , lowerCAmelCase__ ) -> Any:
a : List[str] = self.content_repattera.sub("<URL>" , lowerCAmelCase__ )
a : Optional[int] = self.content_repattera.sub("<EMAIL>" , lowerCAmelCase__ )
a : Optional[Any] = self.content_repattera.sub("<TEL>" , lowerCAmelCase__ )
a : Dict = self.content_repattera.sub("<DATE>" , lowerCAmelCase__ )
a : Any = self.content_repattera.sub("<DATE>" , lowerCAmelCase__ )
a : int = self.content_repattera.sub("<PRICE>" , lowerCAmelCase__ )
a : int = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
a : int = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
a : List[str] = text.replace(" " , "<SP>" )
a : Optional[Any] = text.replace(" " , "<SP>" )
a : Union[str, Any] = text.replace("\r\n" , "<BR>" )
a : List[Any] = text.replace("\n" , "<BR>" )
a : Optional[Any] = text.replace("\r" , "<BR>" )
a : List[Any] = text.replace("\t" , "<TAB>" )
a : Dict = text.replace("—" , "ー" )
a : Optional[Any] = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
a : List[str] = text.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if clean:
a : Any = self.clean_text(lowerCAmelCase__ )
def check_simbol(lowerCAmelCase__ ):
a : int = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 2:
a : Any = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2a1 and c <= 0xc_2bf)
or (c >= 0xc_780 and c <= 0xc_783)
or (c >= 0xc_ab9 and c <= 0xc_bbf)
or (c >= 0xc_c80 and c <= 0xc_da2)
):
return True
return False
def checkuae(lowerCAmelCase__ ):
a : Union[str, Any] = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 3:
a : str = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28_080 and c <= 0xe2b_07f:
return True
return False
a : str = 0
a : int = []
while pos < len(lowerCAmelCase__ ):
a : Optional[Any] = min(len(lowerCAmelCase__ ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
a : Optional[Any] = [] # (token_id, token, pos)
for e in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
a : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase__ ) > 2:
a : Optional[int] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase__ ) > 0:
# the smallest token_id is adopted
a, a, a : Any = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[0] )[0]
result.append(lowerCAmelCase__ )
a : Any = e
else:
a : List[Any] = pos + 1
a : Union[str, Any] = text[pos:end]
if check_simbol(lowerCAmelCase__ ):
result.append("<KIGOU>" )
elif checkuae(lowerCAmelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
a : List[Any] = end
return result
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__="\n" ) -> str:
a : Union[str, Any] = []
a : Union[str, Any] = []
a : int = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("utf-8" , errors="replace" ) )
a : Union[str, Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(lowerCAmelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("utf-8" , errors="replace" ) )
a : Optional[Any] = "".join(lowerCAmelCase__ )
return text
| 105 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__UpperCamelCase : List[str] = '''
Human: <<task>>
Assistant: '''
__UpperCamelCase : Union[str, Any] = '''huggingface-tools/default-prompts'''
__UpperCamelCase : int = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_="run" ):
if prompt_or_repo_id is None:
lowerCAmelCase__ : List[str] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , A_ ) is not None:
return prompt_or_repo_id
lowerCAmelCase__ : str = cached_file(
A_ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(A_ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 106 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 0 |
from bisect import bisect
from itertools import accumulate
def __magic_name__ ( A : Optional[Any], A : List[str], A : Tuple, A : Optional[Any] ):
'''simple docstring'''
a = sorted(zip(A, A ), key=lambda A : x[0] / x[1], reverse=A )
a , a = [i[0] for i in r], [i[1] for i in r]
a = list(accumulate(A ) )
a = bisect(A, A )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE , params=SCREAMING_SNAKE_CASE ).content , "html.parser" )
lowerCAmelCase : int = soup.find("div" , attrs={"class": "gs_ri"} )
lowerCAmelCase : Optional[Any] = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2_018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 108 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A: Optional[Any] = logging.get_logger(__name__)
A: Tuple = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[Any] = 'data2vec-vision'
def __init__( self , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[3, 5, 7, 11] , _SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.4 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=255 , **_SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : List[str] = layer_norm_eps
UpperCAmelCase : Any = image_size
UpperCAmelCase : Tuple = patch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Optional[int] = use_mask_token
UpperCAmelCase : Dict = use_absolute_position_embeddings
UpperCAmelCase : Any = use_relative_position_bias
UpperCAmelCase : Optional[Any] = use_shared_relative_position_bias
UpperCAmelCase : Dict = layer_scale_init_value
UpperCAmelCase : Any = drop_path_rate
UpperCAmelCase : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
UpperCAmelCase : List[Any] = out_indices
UpperCAmelCase : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
UpperCAmelCase : Dict = use_auxiliary_head
UpperCAmelCase : List[str] = auxiliary_loss_weight
UpperCAmelCase : List[Any] = auxiliary_channels
UpperCAmelCase : List[str] = auxiliary_num_convs
UpperCAmelCase : str = auxiliary_concat_input
UpperCAmelCase : Optional[int] = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-4
| 109 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 0 |
"""simple docstring"""
import sys
_SCREAMING_SNAKE_CASE : List[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowerCamelCase : str ) -> Tuple:
lowerCamelCase_ = 1
for digit in s:
product *= int(UpperCAmelCase_ )
return product
def lowerCamelCase__ ( _lowerCamelCase : str = N ) -> Optional[Any]:
lowerCamelCase_ = -sys.maxsize - 1
lowerCamelCase_ = n[:13]
lowerCamelCase_ = 13
while cur_index < len(UpperCAmelCase_ ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
lowerCamelCase_ = substr[1:] + n[cur_index]
cur_index += 1
else:
lowerCamelCase_ = max(UpperCAmelCase_ , str_eval(UpperCAmelCase_ ) )
lowerCamelCase_ = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 183 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : str = 3
__UpperCAmelCase : Dict = (32, 32)
__UpperCAmelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a_ )
return image
@property
def snake_case__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=a_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Dict = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
return CLIPTextModel(a_ )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet_upscale
__UpperCAmelCase : Union[str, Any] = DDPMScheduler()
__UpperCAmelCase : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
__UpperCAmelCase : Any = self.dummy_vae
__UpperCAmelCase : str = self.dummy_text_encoder
__UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Any = Image.fromarray(np.uinta(a_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : str = StableDiffusionUpscalePipeline(
unet=a_ , low_res_scheduler=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , max_noise_level=3_50 , )
__UpperCAmelCase : Tuple = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__UpperCAmelCase : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__UpperCAmelCase : Optional[Any] = torch.Generator(device=a_ ).manual_seed(0 )
__UpperCAmelCase : List[str] = sd_pipe(
[prompt] , image=a_ , generator=a_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : Optional[int] = output.images
__UpperCAmelCase : Tuple = torch.Generator(device=a_ ).manual_seed(0 )
__UpperCAmelCase : Dict = sd_pipe(
[prompt] , image=a_ , generator=a_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=a_ , )[0]
__UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
__UpperCAmelCase : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__UpperCAmelCase : str = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[str] = self.dummy_cond_unet_upscale
__UpperCAmelCase : Tuple = DDPMScheduler()
__UpperCAmelCase : List[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
__UpperCAmelCase : Union[str, Any] = self.dummy_vae
__UpperCAmelCase : str = self.dummy_text_encoder
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Dict = Image.fromarray(np.uinta(a_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=a_ , low_res_scheduler=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , max_noise_level=3_50 , )
__UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__UpperCAmelCase : Union[str, Any] = '''A painting of a squirrel eating a burger'''
__UpperCAmelCase : int = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : Any = output.images
assert image.shape[0] == 2
__UpperCAmelCase : Optional[int] = torch.Generator(device=a_ ).manual_seed(0 )
__UpperCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=a_ , generator=a_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
__UpperCAmelCase : List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.dummy_cond_unet_upscale
__UpperCAmelCase : List[Any] = DDPMScheduler()
__UpperCAmelCase : int = DDIMScheduler(prediction_type='''v_prediction''' )
__UpperCAmelCase : Union[str, Any] = self.dummy_vae
__UpperCAmelCase : List[Any] = self.dummy_text_encoder
__UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__UpperCAmelCase : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : List[str] = Image.fromarray(np.uinta(a_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__UpperCAmelCase : List[str] = unet.half()
__UpperCAmelCase : str = text_encoder.half()
# make sure here that pndm scheduler skips prk
__UpperCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=a_ , low_res_scheduler=a_ , scheduler=a_ , vae=a_ , text_encoder=a_ , tokenizer=a_ , max_noise_level=3_50 , )
__UpperCAmelCase : Optional[int] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__UpperCAmelCase : int = '''A painting of a squirrel eating a burger'''
__UpperCAmelCase : List[str] = torch.manual_seed(0 )
__UpperCAmelCase : str = sd_pipe(
[prompt] , image=a_ , generator=a_ , num_inference_steps=2 , output_type='''np''' , ).images
__UpperCAmelCase : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__UpperCAmelCase : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
__UpperCAmelCase : str = '''stabilityai/stable-diffusion-x4-upscaler'''
__UpperCAmelCase : str = StableDiffusionUpscalePipeline.from_pretrained(a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__UpperCAmelCase : int = '''a cat sitting on a park bench'''
__UpperCAmelCase : str = torch.manual_seed(0 )
__UpperCAmelCase : List[str] = pipe(
prompt=a_ , image=a_ , generator=a_ , output_type='''np''' , )
__UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def snake_case__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__UpperCAmelCase : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
__UpperCAmelCase : Optional[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
__UpperCAmelCase : Tuple = StableDiffusionUpscalePipeline.from_pretrained(
a_ , torch_dtype=torch.floataa , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__UpperCAmelCase : Tuple = '''a cat sitting on a park bench'''
__UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
__UpperCAmelCase : Tuple = pipe(
prompt=a_ , image=a_ , generator=a_ , output_type='''np''' , )
__UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def snake_case__ ( self : int ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCAmelCase : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
__UpperCAmelCase : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
__UpperCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
a_ , torch_dtype=torch.floataa , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__UpperCAmelCase : Union[str, Any] = '''a cat sitting on a park bench'''
__UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
__UpperCAmelCase : List[str] = pipe(
prompt=a_ , image=a_ , generator=a_ , num_inference_steps=5 , output_type='''np''' , )
__UpperCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 226 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
SCREAMING_SNAKE_CASE :Optional[Any] = 'hf-internal-testing/tiny-random-bert'
SCREAMING_SNAKE_CASE :int = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
SCREAMING_SNAKE_CASE :int = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
__A = cached_file(A ,A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A ,A ) ) )
with open(os.path.join(A ,"refs" ,"main" ) ) as f:
__A = f.read()
self.assertEqual(A ,os.path.join(A ,"snapshots" ,A ,A ) )
self.assertTrue(os.path.isfile(A ) )
# File is cached at the same place the second time.
__A = cached_file(A ,A )
self.assertEqual(A ,A )
# Using a specific revision to test the full commit hash.
__A = cached_file(A ,A ,revision="9b8c223" )
self.assertEqual(A ,os.path.join(A ,"snapshots" ,A ,A ) )
def UpperCamelCase_ ( self : str ):
with self.assertRaisesRegex(A ,"is not a valid model identifier" ):
__A = cached_file("tiny-random-bert" ,A )
with self.assertRaisesRegex(A ,"is not a valid git identifier" ):
__A = cached_file(A ,A ,revision="aaaa" )
with self.assertRaisesRegex(A ,"does not appear to have a file named" ):
__A = cached_file(A ,"conf" )
def UpperCamelCase_ ( self : Tuple ):
with self.assertRaisesRegex(A ,"does not appear to have a file named" ):
__A = cached_file(A ,"conf" )
with open(os.path.join(A ,"refs" ,"main" ) ) as f:
__A = f.read()
self.assertTrue(os.path.isfile(os.path.join(A ,".no_exist" ,A ,"conf" ) ) )
__A = cached_file(A ,"conf" ,_raise_exceptions_for_missing_entries=A )
self.assertIsNone(A )
__A = cached_file(A ,"conf" ,local_files_only=A ,_raise_exceptions_for_missing_entries=A )
self.assertIsNone(A )
__A = mock.Mock()
__A = 5_00
__A = {}
__A = HTTPError
__A = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=A ) as mock_head:
__A = cached_file(A ,"conf" ,_raise_exceptions_for_connection_errors=A )
self.assertIsNone(A )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase_ ( self : int ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" ,A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" ,A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" ,A ) )
def UpperCamelCase_ ( self : Dict ):
self.assertIsNone(get_file_from_repo("bert-base-cased" ,"ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A ,"is not a valid model identifier" ):
get_file_from_repo("bert-base-case" ,A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A ,"is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" ,A ,revision="ahaha" )
__A = get_file_from_repo("bert-base-cased" ,A )
# The name is the cached name which is not very easy to test, so instead we load the content.
__A = json.loads(open(A ,"r" ).read() )
self.assertEqual(config["hidden_size"] ,7_68 )
def UpperCamelCase_ ( self : int ):
with tempfile.TemporaryDirectory() as tmp_dir:
__A = Path(A ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(A ,"a.txt" ) ,str(A ) )
self.assertIsNone(get_file_from_repo(A ,"b.txt" ) )
| 15 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
import argparse
import os
import re
_lowercase: List[Any] = 'src/diffusers'
# Pattern that looks at the indentation in a line.
_lowercase: List[str] = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowercase: Union[str, Any] = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowercase: Dict = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowercase: Dict = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowercase: Optional[Any] = re.compile(r"\[([^\]]+)\]")
def a( A : Dict ) -> str:
"""simple docstring"""
a = _re_indent.search(UpperCAmelCase_ )
return "" if search is None else search.groups()[0]
def a( A : Any , A : Any="" , A : List[str]=None , A : int=None ) -> Optional[Any]:
"""simple docstring"""
a = 0
a = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase_ ):
index += 1
a = ["\n".join(lines[:index] )]
else:
a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a = [lines[index]]
index += 1
while index < len(UpperCAmelCase_ ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(UpperCAmelCase_ ) )
if index < len(UpperCAmelCase_ ) - 1:
a = [lines[index + 1]]
index += 1
else:
a = []
else:
blocks.append("\n".join(UpperCAmelCase_ ) )
a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase_ ) > 0:
blocks.append("\n".join(UpperCAmelCase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase_ ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def a( A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
def _inner(A : Optional[int] ):
return key(UpperCAmelCase_ ).lower().replace("_" , "" )
return _inner
def a( A : List[Any] , A : Union[str, Any]=None ) -> List[Any]:
"""simple docstring"""
def noop(A : Any ):
return x
if key is None:
a = noop
# Constants are all uppercase, they go first.
a = [obj for obj in objects if key(UpperCAmelCase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a = [obj for obj in objects if key(UpperCAmelCase_ )[0].isupper() and not key(UpperCAmelCase_ ).isupper()]
# Functions begin with a lowercase, they go last.
a = [obj for obj in objects if not key(UpperCAmelCase_ )[0].isupper()]
a = ignore_underscore(UpperCAmelCase_ )
return sorted(UpperCAmelCase_ , key=UpperCAmelCase_ ) + sorted(UpperCAmelCase_ , key=UpperCAmelCase_ ) + sorted(UpperCAmelCase_ , key=UpperCAmelCase_ )
def a( A : Tuple ) -> List[Any]:
"""simple docstring"""
def _replace(A : Union[str, Any] ):
a = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
a = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a = keys[:-1]
return "[" + ", ".join([f'''\"{k}\"''' for k in sort_objects(UpperCAmelCase_ )] ) + "]"
a = import_statement.split("\n" )
if len(UpperCAmelCase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a = 2 if lines[1].strip() == "[" else 1
a = [(i, _re_strip_line.search(UpperCAmelCase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a = sort_objects(UpperCAmelCase_ , key=lambda A : x[1] )
a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a = _re_bracket_content.sub(_replace , lines[1] )
else:
a = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a = keys[:-1]
a = get_indent(lines[1] ) + ", ".join([f'''\"{k}\"''' for k in sort_objects(UpperCAmelCase_ )] )
return "\n".join(UpperCAmelCase_ )
else:
# Finally we have to deal with imports fitting on one line
a = _re_bracket_content.sub(_replace , UpperCAmelCase_ )
return import_statement
def a( A : Tuple , A : Optional[int]=True ) -> List[str]:
"""simple docstring"""
with open(UpperCAmelCase_ , "r" ) as f:
a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a = split_code_in_indented_blocks(
UpperCAmelCase_ , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a = main_blocks[block_idx]
a = block.split("\n" )
# Get to the start of the imports.
a = 0
while line_idx < len(UpperCAmelCase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a = len(UpperCAmelCase_ )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
a = "\n".join(block_lines[line_idx:-1] )
a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a = split_code_in_indented_blocks(UpperCAmelCase_ , indent_level=UpperCAmelCase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
a = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a = [(pattern.search(UpperCAmelCase_ ).groups()[0] if pattern.search(UpperCAmelCase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a = [(i, key) for i, key in enumerate(UpperCAmelCase_ ) if key is not None]
a = [x[0] for x in sorted(UpperCAmelCase_ , key=lambda A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a = 0
a = []
for i in range(len(UpperCAmelCase_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCAmelCase_ )
count += 1
# And we put our main block back together with its first and last line.
a = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase_ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(UpperCAmelCase_ , "w" ) as f:
f.write("\n".join(UpperCAmelCase_ ) )
def a( A : Any=True ) -> Optional[int]:
"""simple docstring"""
a = []
for root, _, files in os.walk(UpperCAmelCase_ ):
if "__init__.py" in files:
a = sort_imports(os.path.join(UpperCAmelCase_ , "__init__.py" ) , check_only=UpperCAmelCase_ )
if result:
a = [os.path.join(UpperCAmelCase_ , "__init__.py" )]
if len(UpperCAmelCase_ ) > 0:
raise ValueError(f'''Would overwrite {len(UpperCAmelCase_ )} files, run `make style`.''' )
if __name__ == "__main__":
_lowercase: Dict = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowercase: int = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 227 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 0 |
from __future__ import annotations
import math
__A = '2020.9.26'
__A = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> List[str]:
"""simple docstring"""
if not all(isinstance(UpperCAmelCase_ , (float, int) ) for val in locals().values() ):
__lowerCamelCase = F"""Input values must either be float or int: {list(locals().values() )}"""
raise TypeError(UpperCAmelCase_ )
__lowerCamelCase = ((x * distance) / (z + distance)) * scale
__lowerCamelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> Optional[int]:
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Axis must be a str' )
__lowerCamelCase = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase_ , (float, int) ) for val in input_variables.values() ):
__lowerCamelCase = (
'Input values except axis must either be float or int: '
F"""{list(input_variables.values() )}"""
)
raise TypeError(UpperCAmelCase_ )
__lowerCamelCase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__lowerCamelCase = x * math.cos(UpperCAmelCase_ ) - y * math.sin(UpperCAmelCase_ )
__lowerCamelCase = y * math.cos(UpperCAmelCase_ ) + x * math.sin(UpperCAmelCase_ )
__lowerCamelCase = z
elif axis == "x":
__lowerCamelCase = y * math.cos(UpperCAmelCase_ ) - z * math.sin(UpperCAmelCase_ )
__lowerCamelCase = z * math.cos(UpperCAmelCase_ ) + y * math.sin(UpperCAmelCase_ )
__lowerCamelCase = x
elif axis == "y":
__lowerCamelCase = x * math.cos(UpperCAmelCase_ ) - z * math.sin(UpperCAmelCase_ )
__lowerCamelCase = z * math.cos(UpperCAmelCase_ ) + x * math.sin(UpperCAmelCase_ )
__lowerCamelCase = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 1_0.0, 1_0.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, 'y', 9_0.0) = }''')
| 90 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
"""simple docstring"""
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _snake_case ( lowercase__ , lowercase__ , lowercase__=0 ):
# Format the message.
if name is None:
_lowerCamelCase : Tuple = None
else:
_lowerCamelCase : Any = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
_lowerCamelCase : Optional[Any] = fmt.format(UpperCAmelCase_ )
# Print and recurse (if needed).
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
if msg is not None:
print(UpperCAmelCase_ )
for k in val.keys():
recursive_print(UpperCAmelCase_ , val[k] , spaces + 2 )
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
print(UpperCAmelCase_ , ':' , val.size() )
else:
print(UpperCAmelCase_ , ':' , UpperCAmelCase_ )
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_lowerCamelCase : Optional[Any] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCamelCase : Tuple = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCamelCase : Union[str, Any] = param.view(*UpperCAmelCase_ )
_lowerCamelCase : List[Any] = param.transpose(0 , 2 )
_lowerCamelCase : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCamelCase : str = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCamelCase : str = param.view(*UpperCAmelCase_ )
_lowerCamelCase : Optional[int] = param.transpose(0 , 1 ).contiguous()
_lowerCamelCase : List[Any] = param.view(*UpperCAmelCase_ )
return param
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# The converted output model.
_lowerCamelCase : List[str] = {}
# old versions did not store training args
_lowerCamelCase : Optional[Any] = input_state_dict.get('args' , UpperCAmelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCamelCase : List[Any] = ds_args.padded_vocab_size
_lowerCamelCase : List[Any] = ds_args.max_position_embeddings
_lowerCamelCase : List[Any] = ds_args.hidden_size
_lowerCamelCase : Optional[int] = ds_args.num_layers
_lowerCamelCase : Tuple = ds_args.num_attention_heads
_lowerCamelCase : Optional[int] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCamelCase : Optional[int] = config.n_head
# The hidden_size per head.
_lowerCamelCase : Union[str, Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCamelCase : Optional[Any] = input_state_dict['checkpoint_version']
else:
_lowerCamelCase : Union[str, Any] = 0.0
# The model.
_lowerCamelCase : str = input_state_dict['model']
# The language model.
_lowerCamelCase : Tuple = model['language_model']
# The embeddings.
_lowerCamelCase : int = lm['embedding']
# The word embeddings.
_lowerCamelCase : Any = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
_lowerCamelCase : int = word_embeddings[: config.vocab_size, :]
_lowerCamelCase : Union[str, Any] = word_embeddings
# The position embeddings.
_lowerCamelCase : Any = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCamelCase : Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
_lowerCamelCase : List[str] = pos_embeddings
# The transformer.
_lowerCamelCase : Optional[Any] = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
_lowerCamelCase : List[str] = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_lowerCamelCase : Any = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCamelCase : Tuple = layer_re.match(UpperCAmelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCamelCase : Tuple = int(m.group(1 ) )
# The name of the operation.
_lowerCamelCase : int = m.group(2 )
# Is it a weight or a bias?
_lowerCamelCase : Union[str, Any] = m.group(3 )
# The name of the layer.
_lowerCamelCase : List[Any] = f'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_lowerCamelCase : List[str] = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
_lowerCamelCase : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCamelCase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCamelCase : Optional[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCamelCase : int = torch.tensor(-1E4 , dtype=torch.floataa )
_lowerCamelCase : List[Any] = masked_bias
_lowerCamelCase : List[Any] = fix_query_key_value_ordering(UpperCAmelCase_ , UpperCAmelCase_ , 3 , UpperCAmelCase_ , UpperCAmelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCamelCase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
_lowerCamelCase : Any = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCamelCase : Tuple = fix_query_key_value_ordering(UpperCAmelCase_ , UpperCAmelCase_ , 3 , UpperCAmelCase_ , UpperCAmelCase_ )
# Store. No change of shape.
_lowerCamelCase : Union[str, Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCamelCase : Union[str, Any] = megatron_to_transformers[op_name]
_lowerCamelCase : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCamelCase : Optional[int] = megatron_to_transformers[op_name]
_lowerCamelCase : Optional[int] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCamelCase : int = transformer['final_layernorm.weight']
_lowerCamelCase : Union[str, Any] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCamelCase : List[Any] = word_embeddings
# It should be done!
return output_state_dict
def _snake_case ( ):
# Create the argument parser.
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=UpperCAmelCase_ , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=UpperCAmelCase_ , help='An optional config json file describing the pre-trained model.' , )
_lowerCamelCase : Optional[int] = parser.parse_args()
# Extract the basename.
_lowerCamelCase : List[str] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_lowerCamelCase : Optional[int] = torch.load(UpperCAmelCase_ , map_location='cpu' )
else:
_lowerCamelCase : Union[str, Any] = torch.load(args.path_to_checkpoint , map_location='cpu' )
_lowerCamelCase : List[str] = input_state_dict.get('args' , UpperCAmelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCamelCase : Dict = 'gelu_fast'
elif ds_args.openai_gelu:
_lowerCamelCase : Any = 'gelu_new'
else:
_lowerCamelCase : int = 'gelu'
else:
# in the very early days this used to be "gelu_new"
_lowerCamelCase : Tuple = 'gelu_new'
# Spell out all parameters in case the defaults change.
_lowerCamelCase : int = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=UpperCAmelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type='cls_index' , summary_use_proj=UpperCAmelCase_ , summary_activation=UpperCAmelCase_ , summary_proj_to_labels=UpperCAmelCase_ , summary_first_dropout=0.1 , scale_attn_weights=UpperCAmelCase_ , use_cache=UpperCAmelCase_ , bos_token_id=50256 , eos_token_id=50256 , )
else:
_lowerCamelCase : int = GPTaConfig.from_json_file(args.config_file )
_lowerCamelCase : Tuple = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
_lowerCamelCase : Any = convert_megatron_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCAmelCase_ , UpperCAmelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCamelCase : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCamelCase : Tuple = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
_lowerCamelCase : Optional[Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
_lowerCamelCase : Any = 'gpt2'
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_lowerCamelCase : List[Any] = type(UpperCAmelCase_ ).__name__
_lowerCamelCase : List[Any] = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(UpperCAmelCase_ )
# Save tokenizer based on args
print(f'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(UpperCAmelCase_ )
# Store the state_dict to file.
_lowerCamelCase : Any = os.path.join(UpperCAmelCase_ , 'pytorch_model.bin' )
print(f'''Saving checkpoint to \"{output_checkpoint_file}\"''' )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
#################################################################################################### | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=True ):
"""simple docstring"""
model.train()
A_ : List[str] = model(UpperCAmelCase_ )
A_ : List[Any] = F.mse_loss(UpperCAmelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCAmelCase_ )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
set_seed(42 )
A_ : List[Any] = RegressionModel()
A_ : List[str] = deepcopy(UpperCAmelCase_ )
A_ : List[Any] = RegressionDataset(length=80 )
A_ : int = DataLoader(UpperCAmelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
A_ : List[Any] = AdamW(params=model.parameters() , lr=1E-3 )
A_ : Union[str, Any] = AdamW(params=ddp_model.parameters() , lr=1E-3 )
A_ : Optional[int] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda _UpperCAmelCase : epoch**0.65 )
A_ : Union[str, Any] = LambdaLR(UpperCAmelCase_ , lr_lambda=lambda _UpperCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
A_ , A_ , A_ , A_ : str = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
A_ , A_ : Tuple = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ , A_ : Union[str, Any] = get_training_setup(UpperCAmelCase_ )
# Use a single batch
A_ , A_ : Tuple = next(iter(UpperCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ , A_ : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# Sync grads
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
A_ : Any = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ , A_ , A_ : List[Any] = get_training_setup(UpperCAmelCase_ )
# Use a single batch
A_ , A_ : Tuple = next(iter(UpperCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ , A_ : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# Sync grads
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
A_ : str = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
def lowercase_ ( _UpperCAmelCase=False , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Optional[int] = Accelerator(
split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ , A_ , A_ : int = get_training_setup(UpperCAmelCase_ )
for iteration, batch in enumerate(UpperCAmelCase_ ):
A_ , A_ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
A_ , A_ : List[str] = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCAmelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
A_ : Tuple = ddp_input[torch.randperm(len(UpperCAmelCase_ ) )]
GradientState._reset_state()
def lowercase_ ( _UpperCAmelCase=False , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : str = Accelerator(
split_batches=UpperCAmelCase_ , dispatch_batches=UpperCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ , A_ , A_ , A_ , A_ , A_ , A_ : Optional[int] = get_training_setup(UpperCAmelCase_ , UpperCAmelCase_ )
for iteration, batch in enumerate(UpperCAmelCase_ ):
A_ , A_ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
A_ , A_ : Dict = accelerator.gather((ddp_input, ddp_target) )
A_ , A_ : Union[str, Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCAmelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCAmelCase_ ):
step_model(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
A_ : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCAmelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowercase_ ( ):
"""simple docstring"""
A_ : Tuple = Accelerator()
A_ : int = RegressionDataset(length=80 )
A_ : int = DataLoader(UpperCAmelCase_ , batch_size=16 )
A_ : str = RegressionDataset(length=96 )
A_ : Any = DataLoader(UpperCAmelCase_ , batch_size=16 )
A_ , A_ : int = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ )
if iteration < len(UpperCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCAmelCase_ )
if batch_num < len(UpperCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowercase_ ( ):
"""simple docstring"""
A_ : Dict = Accelerator()
A_ : str = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(UpperCAmelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(UpperCAmelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(UpperCAmelCase_ , UpperCAmelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 167 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase_ ( _lowerCamelCase ):
__lowerCamelCase : Optional[int] = "xmod"
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=2 , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=("en_XX",) , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
_lowerCAmelCase = pre_norm
_lowerCAmelCase = adapter_reduction_factor
_lowerCAmelCase = adapter_layer_norm
_lowerCAmelCase = adapter_reuse_layer_norm
_lowerCAmelCase = ln_before_adapter
_lowerCAmelCase = list(_lowerCAmelCase )
_lowerCAmelCase = default_language
class lowerCAmelCase_ ( _lowerCamelCase ):
@property
def _snake_case ( self ) -> str:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 158 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
import math
def __UpperCamelCase ( lowerCAmelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( lowerCAmelCase__ : float = 0.1 ):
__a : List[Any] = 3
__a : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCAmelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase (snake_case__ : float , snake_case__ : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = u
for i in range(1 , UpperCAmelCase_ ):
lowerCAmelCase = temp * (u - i)
return temp
def lowercase () -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = int(input("""enter the numbers of values: """ ) )
lowerCAmelCase = []
for _ in range(UpperCAmelCase_ ):
y.append([] )
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
y[i].append(UpperCAmelCase_ )
lowerCAmelCase = 0
print("""enter the values of parameters in a list: """ )
lowerCAmelCase = list(map(UpperCAmelCase_ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCAmelCase_ ):
lowerCAmelCase = float(input() )
lowerCAmelCase = int(input("""enter the value to interpolate: """ ) )
lowerCAmelCase = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCAmelCase_ ):
for j in range(n - i ):
lowerCAmelCase = y[j + 1][i - 1] - y[j][i - 1]
lowerCAmelCase = y[0][0]
for i in range(1 , UpperCAmelCase_ ):
summ += (ucal(UpperCAmelCase_ , UpperCAmelCase_ ) * y[0][i]) / math.factorial(UpperCAmelCase_ )
print(f'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 155 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 0 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( _lowerCamelCase , unittest.TestCase ):
lowercase = None
lowercase = BloomTokenizerFast
lowercase = BloomTokenizerFast
lowercase = True
lowercase = False
lowercase = 'tokenizer_file'
lowercase = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'}
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
lowercase_ : str = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.get_rust_tokenizer()
lowercase_ : Dict = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
lowercase_ : Optional[int] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
lowercase_ : str = tokenizer.batch_encode_plus(__UpperCamelCase )['input_ids']
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[str] = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase=6 ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowercase_ : Optional[int] = 'This is a simple input'
lowercase_ : Dict = ['This is a simple input 1', 'This is a simple input 2']
lowercase_ : Optional[Any] = ('This is a simple input', 'This is a pair')
lowercase_ : str = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(__UpperCamelCase ,max_length=__UpperCamelCase )
tokenizer_r.encode_plus(__UpperCamelCase ,max_length=__UpperCamelCase )
tokenizer_r.batch_encode_plus(__UpperCamelCase ,max_length=__UpperCamelCase )
tokenizer_r.encode(__UpperCamelCase ,max_length=__UpperCamelCase )
tokenizer_r.batch_encode_plus(__UpperCamelCase ,max_length=__UpperCamelCase )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
lowercase_ : Dict = None # Hotfixing padding = None
self.assertRaises(__UpperCamelCase ,tokenizer_r.encode ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' )
# Simple input
self.assertRaises(__UpperCamelCase ,tokenizer_r.encode_plus ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' )
# Simple input
self.assertRaises(
__UpperCamelCase ,tokenizer_r.batch_encode_plus ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' ,)
# Pair input
self.assertRaises(__UpperCamelCase ,tokenizer_r.encode ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' )
# Pair input
self.assertRaises(__UpperCamelCase ,tokenizer_r.encode_plus ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' )
# Pair input
self.assertRaises(
__UpperCamelCase ,tokenizer_r.batch_encode_plus ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' ,)
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = self.get_rust_tokenizer()
lowercase_ : Union[str, Any] = load_dataset('xnli' ,'all_languages' ,split='test' ,streaming=__UpperCamelCase )
lowercase_ : Any = next(iter(__UpperCamelCase ) )['premise'] # pick up one data
lowercase_ : List[str] = list(sample_data.values() )
lowercase_ : Optional[Any] = list(map(tokenizer.encode ,__UpperCamelCase ) )
lowercase_ : Optional[Any] = [tokenizer.decode(__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase ) for x in output_tokens]
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) ,1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) ,1 )
| 213 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 0 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_SCREAMING_SNAKE_CASE : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
_SCREAMING_SNAKE_CASE : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def lowerCamelCase__ ( ) -> List[Any]:
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def lowerCamelCase__ ( ) -> Union[str, Any]:
lowerCamelCase_ = 'rougeLsum'
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase__ ( ) -> Dict:
lowerCamelCase_ = ['rouge1', 'rouge2', 'rougeL']
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def lowerCamelCase__ ( ) -> Optional[Any]:
lowerCamelCase_ = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .',
]
lowerCamelCase_ = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def lowerCamelCase__ ( ) -> Optional[Any]:
lowerCamelCase_ = [
'\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" '
]
lowerCamelCase_ = [
' Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=['rougeLsum'] , newline_sep=UpperCAmelCase_ )['rougeLsum']
lowerCamelCase_ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def lowerCamelCase__ ( ) -> str:
lowerCamelCase_ = Path('examples/seq2seq/test_data/wmt_en_ro' )
lowerCamelCase_ = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 183 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ViTImageProcessor if is_vision_available() else None
@property
def snake_case__ ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = (3, 32, 1_28)
__UpperCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__UpperCAmelCase : str = dict(zip(a_ , range(len(a_ ) ) ) )
__UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a_ ) + '''\n''' )
__UpperCAmelCase : str = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 1_28},
}
__UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(a_ , a_ )
def snake_case__ ( self : List[str] , **a_ : Union[str, Any] ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **a_ )
def snake_case__ ( self : List[Any] , **a_ : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.moveaxis(a_ , 0 , -1 ) )
return image_input
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def snake_case__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_tokenizer()
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__UpperCAmelCase : Dict = self.get_image_processor(do_normalize=a_ , padding_value=1.0 )
__UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=a_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : List[Any] = image_processor(a_ , return_tensors='''np''' )
__UpperCAmelCase : Tuple = processor(images=a_ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
__UpperCAmelCase : Optional[Any] = '''test'''
__UpperCAmelCase : Dict = processor(text=a_ )
__UpperCAmelCase : List[str] = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = self.get_image_processor()
__UpperCAmelCase : Any = self.get_tokenizer()
__UpperCAmelCase : str = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
__UpperCAmelCase : Optional[int] = '''test'''
__UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
__UpperCAmelCase : List[Any] = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = self.get_image_processor()
__UpperCAmelCase : int = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
__UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Optional[Any] = processor.char_decode(a_ )
__UpperCAmelCase : Optional[Any] = tokenizer.batch_decode(a_ )
__UpperCAmelCase : List[Any] = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(a_ , a_ )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : List[Any] = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : Dict = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=a_ , image_processor=a_ )
__UpperCAmelCase : List[str] = torch.randn(1 , 27 , 38 )
__UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 5_02_57 )
__UpperCAmelCase : str = torch.randn(1 , 27 , 3_05_22 )
__UpperCAmelCase : Optional[Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
| 226 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
snake_case_ = "lxmert"
snake_case_ = {}
def __init__( self : Dict ,A : Tuple=3_05_22 ,A : List[Any]=7_68 ,A : Optional[Any]=12 ,A : Any=95_00 ,A : Optional[Any]=16_00 ,A : Union[str, Any]=4_00 ,A : str=30_72 ,A : int="gelu" ,A : List[str]=0.1 ,A : int=0.1 ,A : List[str]=5_12 ,A : List[str]=2 ,A : Optional[Any]=0.02 ,A : Union[str, Any]=1E-12 ,A : Dict=9 ,A : Optional[int]=5 ,A : Optional[int]=5 ,A : Union[str, Any]=20_48 ,A : Tuple=4 ,A : Optional[int]=6.67 ,A : Dict=True ,A : int=True ,A : Dict=True ,A : Optional[int]=True ,A : List[str]=True ,A : str=True ,A : Any=True ,**A : str ,):
__A = vocab_size
__A = hidden_size
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = num_qa_labels
__A = num_object_labels
__A = num_attr_labels
__A = l_layers
__A = x_layers
__A = r_layers
__A = visual_feat_dim
__A = visual_pos_dim
__A = visual_loss_normalizer
__A = task_matched
__A = task_mask_lm
__A = task_obj_predict
__A = task_qa
__A = visual_obj_loss
__A = visual_attr_loss
__A = visual_feat_loss
__A = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**A )
| 15 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: str = logging.get_logger(__name__)
_lowercase: Optional[Any] = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = "informer"
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__(self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = "student_t" , lowerCamelCase_ = "nll" , lowerCamelCase_ = 1 , lowerCamelCase_ = None , lowerCamelCase_ = "mean" , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 , lowerCamelCase_ = 0 , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = 64 , lowerCamelCase_ = 32 , lowerCamelCase_ = 32 , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , lowerCamelCase_ = True , lowerCamelCase_ = "gelu" , lowerCamelCase_ = 0.05 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 0.1 , lowerCamelCase_ = 100 , lowerCamelCase_ = 0.02 , lowerCamelCase_=True , lowerCamelCase_ = "prob" , lowerCamelCase_ = 5 , lowerCamelCase_ = True , **lowerCamelCase_ , ):
"""simple docstring"""
a = prediction_length
a = context_length or prediction_length
a = distribution_output
a = loss
a = input_size
a = num_time_features
a = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a = scaling
a = num_dynamic_real_features
a = num_static_real_features
a = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a = cardinality
else:
a = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a = embedding_dimension
else:
a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a = num_parallel_samples
# Transformer architecture configuration
a = input_size * len(self.lags_sequence ) + self._number_of_features
a = d_model
a = encoder_attention_heads
a = decoder_attention_heads
a = encoder_ffn_dim
a = decoder_ffn_dim
a = encoder_layers
a = decoder_layers
a = dropout
a = attention_dropout
a = activation_dropout
a = encoder_layerdrop
a = decoder_layerdrop
a = activation_function
a = init_std
a = use_cache
# Informer
a = attention_type
a = sampling_factor
a = distil
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 227 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=0.9 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=[0.5, 0.5, 0.5] , lowerCamelCase__=[0.5, 0.5, 0.5] , ) -> Any:
'''simple docstring'''
__lowerCamelCase = size if size is not None else {'shortest_edge': 30}
__lowerCamelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = do_resize_and_center_crop
__lowerCamelCase = size
__lowerCamelCase = crop_pct
__lowerCamelCase = crop_size
__lowerCamelCase = do_normalize
__lowerCamelCase = image_mean
__lowerCamelCase = image_std
def lowercase_ ( self ) -> int:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = PoolFormerImageProcessor if is_vision_available() else None
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = PoolFormerImageProcessingTester(self )
@property
def lowercase_ ( self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'crop_pct' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase__ , 'image_std' ) )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
__lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowercase_ ( self ) -> str:
'''simple docstring'''
pass
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCamelCase = image_processing(lowerCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 90 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowercase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : int = size if size is not None else {'shortest_edge': 224}
_lowerCamelCase : Optional[Any] = get_size_dict(lowercase , default_to_square=lowercase )
_lowerCamelCase : Optional[int] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCamelCase : List[str] = get_size_dict(lowercase , default_to_square=lowercase , param_name='crop_size' )
_lowerCamelCase : Optional[Any] = do_resize
_lowerCamelCase : Dict = size
_lowerCamelCase : Dict = resample
_lowerCamelCase : Optional[int] = do_center_crop
_lowerCamelCase : int = crop_size
_lowerCamelCase : Any = do_rescale
_lowerCamelCase : Optional[int] = rescale_factor
_lowerCamelCase : List[str] = do_normalize
_lowerCamelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase : List[Any] = do_convert_rgb
def A_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
_lowerCamelCase : int = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCamelCase : Tuple = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
_lowerCamelCase : Tuple = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : Optional[Any] = size if size is not None else self.size
_lowerCamelCase : Any = get_size_dict(lowercase , param_name='size' , default_to_square=lowercase )
_lowerCamelCase : Union[str, Any] = resample if resample is not None else self.resample
_lowerCamelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Optional[Any] = get_size_dict(lowercase , param_name='crop_size' , default_to_square=lowercase )
_lowerCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Any = image_std if image_std is not None else self.image_std
_lowerCamelCase : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : List[str] = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : str = [to_numpy_array(lowercase ) for image in images]
if do_resize:
_lowerCamelCase : Dict = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
_lowerCamelCase : Tuple = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : List[str] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
_lowerCamelCase : Optional[Any] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
_lowerCamelCase : int = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 96 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 0 |
"""simple docstring"""
import math
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 167 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
super().tearDown()
gc.collect()
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
_lowerCAmelCase = "A painting of a squirrel eating a burger"
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = sd_pipe.prepare_inputs(_lowerCAmelCase )
_lowerCAmelCase = replicate(_lowerCAmelCase )
_lowerCAmelCase = shard(_lowerCAmelCase )
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.split(_lowerCAmelCase , jax.device_count() )
_lowerCAmelCase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase = images[0, 253:256, 253:256, -1]
_lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = "stabilityai/stable-diffusion-2"
_lowerCAmelCase , _lowerCAmelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowerCAmelCase , subfolder="scheduler" )
_lowerCAmelCase , _lowerCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , revision="bf16" , dtype=jnp.bfloataa , )
_lowerCAmelCase = scheduler_params
_lowerCAmelCase = "A painting of a squirrel eating a burger"
_lowerCAmelCase = jax.device_count()
_lowerCAmelCase = num_samples * [prompt]
_lowerCAmelCase = sd_pipe.prepare_inputs(_lowerCAmelCase )
_lowerCAmelCase = replicate(_lowerCAmelCase )
_lowerCAmelCase = shard(_lowerCAmelCase )
_lowerCAmelCase = jax.random.PRNGKey(0 )
_lowerCAmelCase = jax.random.split(_lowerCAmelCase , jax.device_count() )
_lowerCAmelCase = sd_pipe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , num_inference_steps=25 , jit=_lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_lowerCAmelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_lowerCAmelCase = images[0, 253:256, 253:256, -1]
_lowerCAmelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_lowerCAmelCase = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 158 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( _lowerCamelCase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : int = KandinskyInpaintPipeline
_SCREAMING_SNAKE_CASE : Tuple = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
_SCREAMING_SNAKE_CASE : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
_SCREAMING_SNAKE_CASE : Optional[Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_SCREAMING_SNAKE_CASE : Tuple = False
@property
def lowerCAmelCase (self : Optional[Any] ):
return 3_2
@property
def lowerCAmelCase (self : List[Any] ):
return 3_2
@property
def lowerCAmelCase (self : Optional[int] ):
return self.time_input_dim
@property
def lowerCAmelCase (self : List[str] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase (self : Any ):
return 1_0_0
@property
def lowerCAmelCase (self : List[str] ):
__a : int = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowerCAmelCase (self : str ):
torch.manual_seed(0 )
__a : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
__a : Union[str, Any] = MultilingualCLIP(snake_case_ )
__a : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase (self : Dict ):
torch.manual_seed(0 )
__a : Dict = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__a : List[Any] = UNetaDConditionModel(**snake_case_ )
return model
@property
def lowerCAmelCase (self : List[Any] ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase (self : Optional[Any] ):
torch.manual_seed(0 )
__a : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase (self : int ):
__a : List[Any] = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Union[str, Any] = self.dummy_unet
__a : List[Any] = self.dummy_movq
__a : List[str] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case_ , )
__a : List[str] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase (self : Optional[int] , snake_case_ : Tuple , snake_case_ : int=0 ):
__a : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__a : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
__a : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
__a : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : str = Image.fromarray(np.uinta(snake_case_ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
__a : Any = np.ones((6_4, 6_4) , dtype=np.floataa )
__a : Tuple = 0
if str(snake_case_ ).startswith('''mps''' ):
__a : str = torch.manual_seed(snake_case_ )
else:
__a : Tuple = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
__a : Tuple = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase (self : Optional[Any] ):
__a : Union[str, Any] = '''cpu'''
__a : Tuple = self.get_dummy_components()
__a : str = self.pipeline_class(**snake_case_ )
__a : Tuple = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
__a : List[str] = pipe(**self.get_dummy_inputs(snake_case_ ) )
__a : Optional[Any] = output.images
__a : Union[str, Any] = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
__a : int = image[0, -3:, -3:, -1]
__a : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 6_4, 6_4, 3)
__a : List[Any] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def lowerCAmelCase (self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase (self : Optional[int] ):
__a : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__a : Any = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
__a : Tuple = 0
__a : Union[str, Any] = '''a hat'''
__a : List[str] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
__a : int = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
__a : Union[str, Any] = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
__a : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
__a , __a : Optional[int] = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__a : str = pipeline(
snake_case_ , image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
__a : Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 216 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 213 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int = 10 ) -> Optional[Any]:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase_ = 10**n
lowerCamelCase_ = 28433 * (pow(2 , 7830457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(10) = }''')
| 183 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 0 |
import argparse
import struct
import unittest
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Dict , a_ : bytes ):
'''simple docstring'''
__UpperCAmelCase : Dict = data
# Initialize hash values
__UpperCAmelCase : Optional[Any] = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
__UpperCAmelCase : Union[str, Any] = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
__UpperCAmelCase : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def snake_case__ ( a_ : bytes ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = b'''\x80''' + (b'''\x00''' * (63 - (len(a_ ) + 8) % 64))
__UpperCAmelCase : Optional[int] = struct.pack('''>Q''' , (len(a_ ) * 8) )
return data + padding + big_endian_integer
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__UpperCAmelCase : Optional[Any] = list(struct.unpack('''>16L''' , a_ ) )
# add 48 0-ed integers
words += [0] * 48
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__UpperCAmelCase : List[str] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__UpperCAmelCase : Tuple = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__UpperCAmelCase : Union[str, Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
__UpperCAmelCase : List[str] = self.ror(a_ , 6 ) ^ self.ror(a_ , 11 ) ^ self.ror(a_ , 25 )
__UpperCAmelCase : Dict = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
__UpperCAmelCase : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
__UpperCAmelCase : str = self.ror(a_ , 2 ) ^ self.ror(a_ , 13 ) ^ self.ror(a_ , 22 )
__UpperCAmelCase : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
__UpperCAmelCase : List[Any] = (sa + maj) % 0x1_00_00_00_00
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
__UpperCAmelCase : str = [a, b, c, d, e, f, g, h]
# Modify final values
__UpperCAmelCase : List[str] = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
__UpperCAmelCase : Union[str, Any] = ''''''.join([hex(a_ )[2:].zfill(8 ) for value in self.hashes] )
def snake_case__ ( self : Any , a_ : int , a_ : int ):
'''simple docstring'''
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : Any ):
'''simple docstring'''
import hashlib
__UpperCAmelCase : Union[str, Any] = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(a_ ).hash , hashlib.shaaaa(a_ ).hexdigest() )
def a ( ):
'''simple docstring'''
import doctest
doctest.testmod()
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
__UpperCAmelCase : Optional[int] = parser.parse_args()
__UpperCAmelCase : Dict = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
__UpperCAmelCase : str = f.read()
else:
__UpperCAmelCase : Any = bytes(UpperCAmelCase_ , '''utf-8''' )
print(SHAaaa(UpperCAmelCase_ ).hash )
if __name__ == "__main__":
main()
| 226 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 0 |
import os
def UpperCAmelCase ( a_ = "matrix.txt" ) -> Union[str, Any]:
"""simple docstring"""
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) as in_file:
__A = in_file.read()
__A = [[int(UpperCAmelCase_ ) for cell in row.split("," )] for row in data.strip().splitlines()]
__A = [[0 for cell in row] for row in grid]
__A = len(grid[0] )
__A = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
__A = grid[0][0]
for i in range(1 , UpperCAmelCase_ ):
__A = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCAmelCase_ ):
__A = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCAmelCase_ ):
for j in range(1 , UpperCAmelCase_ ):
__A = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 15 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _lowercase ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__A = True
__A = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ (self ):
"""simple docstring"""
a = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCamelCase_ )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
a = jnp.array([[0, 1, 2, 3, 4, 5]] )
a = model(lowerCamelCase_ )[0]
a = 50000
a = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCamelCase_ )
a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
| 227 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 0 |
def lowerCamelCase_ ( UpperCamelCase__ : int = 50 ) -> Any:
"""simple docstring"""
__lowerCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 90 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
lowercase__ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
lowercase__ = {
'ctrl': 256,
}
lowercase__ = {
'Pregnancy': 16_8629,
'Christianity': 7675,
'Explain': 10_6423,
'Fitness': 6_3440,
'Saving': 6_3163,
'Ask': 2_7171,
'Ass': 9_5985,
'Joke': 16_3509,
'Questions': 4_5622,
'Thoughts': 4_9605,
'Retail': 5_2342,
'Feminism': 16_4338,
'Writing': 1_1992,
'Atheism': 19_2263,
'Netflix': 4_8616,
'Computing': 3_9639,
'Opinion': 4_3213,
'Alone': 4_4967,
'Funny': 5_8917,
'Gaming': 4_0358,
'Human': 4088,
'India': 1331,
'Joker': 7_7138,
'Diet': 3_6206,
'Legal': 1_1859,
'Norman': 4939,
'Tip': 7_2689,
'Weight': 5_2343,
'Movies': 4_6273,
'Running': 2_3425,
'Science': 2090,
'Horror': 3_7793,
'Confession': 6_0572,
'Finance': 1_2250,
'Politics': 1_6360,
'Scary': 19_1985,
'Support': 1_2654,
'Technologies': 3_2516,
'Teenage': 6_6160,
'Event': 3_2769,
'Learned': 6_7460,
'Notion': 18_2770,
'Wikipedia': 3_7583,
'Books': 6665,
'Extract': 7_6050,
'Confessions': 10_2701,
'Conspiracy': 7_5932,
'Links': 6_3674,
'Narcissus': 15_0425,
'Relationship': 5_4766,
'Relationships': 13_4796,
'Reviews': 4_1671,
'News': 4256,
'Translation': 2_6820,
'multilingual': 12_8406,
}
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[str] = set()
_lowerCamelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCamelCase : Any = char
_lowerCamelCase : List[str] = set(UpperCAmelCase_ )
return pairs
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTROL_CODES
def __init__( self , lowercase , lowercase , lowercase="<unk>" , **lowercase ):
super().__init__(unk_token=lowercase , **lowercase )
with open(lowercase , encoding='utf-8' ) as vocab_handle:
_lowerCamelCase : int = json.load(lowercase )
_lowerCamelCase : str = {v: k for k, v in self.encoder.items()}
with open(lowercase , encoding='utf-8' ) as merges_handle:
_lowerCamelCase : Optional[Any] = merges_handle.read().split('\n' )[1:-1]
_lowerCamelCase : Dict = [tuple(merge.split() ) for merge in merges]
_lowerCamelCase : Union[str, Any] = dict(zip(lowercase , range(len(lowercase ) ) ) )
_lowerCamelCase : int = {}
@property
def A_ ( self ):
return len(self.encoder )
def A_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A_ ( self , lowercase ):
if token in self.cache:
return self.cache[token]
_lowerCamelCase : str = tuple(lowercase )
_lowerCamelCase : Tuple = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
_lowerCamelCase : int = get_pairs(lowercase )
if not pairs:
return token
while True:
_lowerCamelCase : Optional[int] = min(lowercase , key=lambda lowercase : self.bpe_ranks.get(lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCamelCase, _lowerCamelCase : Any = bigram
_lowerCamelCase : Dict = []
_lowerCamelCase : List[Any] = 0
while i < len(lowercase ):
try:
_lowerCamelCase : List[str] = word.index(lowercase , lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCamelCase : int = j
if word[i] == first and i < len(lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCamelCase : Optional[int] = tuple(lowercase )
_lowerCamelCase : int = new_word
if len(lowercase ) == 1:
break
else:
_lowerCamelCase : Tuple = get_pairs(lowercase )
_lowerCamelCase : Dict = '@@ '.join(lowercase )
_lowerCamelCase : Tuple = word[:-4]
_lowerCamelCase : Union[str, Any] = word
return word
def A_ ( self , lowercase ):
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = re.findall(r'\S+\n?' , lowercase )
for token in words:
split_tokens.extend(list(self.bpe(lowercase ).split(' ' ) ) )
return split_tokens
def A_ ( self , lowercase ):
return self.encoder.get(lowercase , self.encoder.get(self.unk_token ) )
def A_ ( self , lowercase ):
return self.decoder.get(lowercase , self.unk_token )
def A_ ( self , lowercase ):
_lowerCamelCase : List[str] = ' '.join(lowercase ).replace('@@ ' , '' ).strip()
return out_string
def A_ ( self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Dict = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Optional[int] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase ) + '\n' )
_lowerCamelCase : str = 0
with open(lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
_lowerCamelCase : Tuple = token_index
writer.write(' '.join(lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far) | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_lowerCamelCase : str = parse(importlib.metadata.version('torch'))
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A_ : List[Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A_ : Dict = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 167 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 0 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase_ ( _lowerCamelCase ):
def __init__( self , *_lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ) -> int:
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
_lowerCAmelCase = eval_examples
_lowerCAmelCase = post_process_function
def _snake_case ( self , _lowerCAmelCase = None , _lowerCAmelCase=None , _lowerCAmelCase = None , _lowerCAmelCase = "eval" , **_lowerCAmelCase , ) -> str:
_lowerCAmelCase = gen_kwargs.copy()
_lowerCAmelCase = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
_lowerCAmelCase = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
_lowerCAmelCase = gen_kwargs
_lowerCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_lowerCAmelCase = self.get_eval_dataloader(_lowerCAmelCase )
_lowerCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCAmelCase = self.compute_metrics
_lowerCAmelCase = None
_lowerCAmelCase = time.time()
_lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCAmelCase = eval_loop(
_lowerCAmelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
_lowerCAmelCase = compute_metrics
_lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_lowerCAmelCase = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
_lowerCAmelCase = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
else:
_lowerCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowerCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_lowerCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowerCAmelCase )
return metrics
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase = "test" , **_lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = gen_kwargs.copy()
_lowerCAmelCase = self.get_test_dataloader(_lowerCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_lowerCAmelCase = self.compute_metrics
_lowerCAmelCase = None
_lowerCAmelCase = time.time()
_lowerCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_lowerCAmelCase = eval_loop(
_lowerCAmelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowerCAmelCase , metric_key_prefix=_lowerCAmelCase , )
finally:
_lowerCAmelCase = compute_metrics
_lowerCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowerCAmelCase , _lowerCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_lowerCAmelCase = self.post_process_function(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , "predict" )
_lowerCAmelCase = self.compute_metrics(_lowerCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
_lowerCAmelCase = metrics.pop(_lowerCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowerCAmelCase )
| 158 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any]=7 ):
__a : Any = None
if token is not None:
__a : List[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"Bearer {token}"}
# The id of a workflow (not of a workflow run)
__a : Optional[int] = '''636036'''
__a : Any = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
__a : List[str] = requests.get(UpperCAmelCase_ , headers=UpperCAmelCase_ ).json()
return result["workflow_runs"]
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Dict = get_daily_ci_runs(UpperCAmelCase_ )
__a : Any = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__a : Optional[int] = workflow_run['''id''']
break
return workflow_run_id
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ):
__a : Optional[int] = get_last_daily_ci_runs(UpperCAmelCase_ )
if workflow_run_id is not None:
__a : Optional[Any] = get_artifacts_links(worflow_run_id=UpperCAmelCase_ , token=UpperCAmelCase_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__a : Tuple = artifacts_links[artifact_name]
download_artifact(
artifact_name=UpperCAmelCase_ , artifact_url=UpperCAmelCase_ , output_dir=UpperCAmelCase_ , token=UpperCAmelCase_ )
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
get_last_daily_ci_artifacts(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
__a : Dict = {}
for artifact_name in artifact_names:
__a : Optional[Any] = os.path.join(UpperCAmelCase_ , f"{artifact_name}.zip" )
if os.path.isfile(UpperCAmelCase_ ):
__a : Any = {}
with zipfile.ZipFile(UpperCAmelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase_ ):
# read the file
with z.open(UpperCAmelCase_ ) as f:
__a : Union[str, Any] = f.read().decode('''UTF-8''' )
return results
| 216 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 0 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a = logging.get_logger(__name__)
a = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
a = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
a = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def lowercase (snake_case__ : Any , snake_case__ : Tuple ) -> List[Any]:
'''simple docstring'''
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
lowerCAmelCase = collections.OrderedDict()
with open(UpperCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase = f.readlines()
lowerCAmelCase = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(UpperCAmelCase_ ):
lowerCAmelCase = b
lowerCAmelCase = idx
for wd in b:
lowerCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple="<|endoftext|>" , lowerCAmelCase : Any="<|endoftext|>" , lowerCAmelCase : str="<|startoftext|>" , lowerCAmelCase : int="<|endoftext|>" , lowerCAmelCase : List[str]=False , **lowerCAmelCase : Tuple , ):
super().__init__(
unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , do_clean_text=lowerCAmelCase , **lowerCAmelCase , )
if not os.path.isfile(lowerCAmelCase ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(lowerCAmelCase ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
lowerCAmelCase = do_clean_text
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = load_vocab_and_emoji(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __lowercase ( self : List[Any] ):
return len(self.raw_vocab )
def __lowercase ( self : Optional[Any] ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __lowercase ( self : Any , lowerCAmelCase : Union[str, Any] ):
return self.subword_tokenizer.tokenize(lowerCAmelCase , clean=self.do_clean_text )
def __lowercase ( self : Tuple , lowerCAmelCase : Any ):
return self.vocab.get(lowerCAmelCase , self.vocab.get(self.unk_token ) )
def __lowercase ( self : List[Any] , lowerCAmelCase : Dict ):
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Any ):
lowerCAmelCase = """""".join(lowerCAmelCase ).strip()
return out_string
def __lowercase ( self : Tuple , lowerCAmelCase : "Conversation" ):
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def __lowercase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
lowerCAmelCase = 0
if os.path.isdir(lowerCAmelCase ):
lowerCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
lowerCAmelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
lowerCAmelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
lowerCAmelCase = token_index
writer.write(""",""".join(lowerCAmelCase ) + """\n""" )
index += 1
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , lowerCAmelCase )
return vocab_file, emoji_file
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
def __init__( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
lowerCAmelCase = vocab # same as swe
lowerCAmelCase = ids_to_tokens # same as bpe
lowerCAmelCase = emoji
lowerCAmelCase = np.max([len(lowerCAmelCase ) for w in self.vocab.keys()] )
lowerCAmelCase = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
lowerCAmelCase = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
lowerCAmelCase = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
lowerCAmelCase = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
lowerCAmelCase = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
lowerCAmelCase = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
lowerCAmelCase = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
lowerCAmelCase = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
lowerCAmelCase = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : Optional[Any] ):
return len(self.ids_to_tokens )
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Any ):
lowerCAmelCase = self.content_repattera.sub("""<URL>""" , lowerCAmelCase )
lowerCAmelCase = self.content_repattera.sub("""<EMAIL>""" , lowerCAmelCase )
lowerCAmelCase = self.content_repattera.sub("""<TEL>""" , lowerCAmelCase )
lowerCAmelCase = self.content_repattera.sub("""<DATE>""" , lowerCAmelCase )
lowerCAmelCase = self.content_repattera.sub("""<DATE>""" , lowerCAmelCase )
lowerCAmelCase = self.content_repattera.sub("""<PRICE>""" , lowerCAmelCase )
lowerCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def __lowercase ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=False ):
lowerCAmelCase = text.replace(""" """ , """<SP>""" )
lowerCAmelCase = text.replace(""" """ , """<SP>""" )
lowerCAmelCase = text.replace("""\r\n""" , """<BR>""" )
lowerCAmelCase = text.replace("""\n""" , """<BR>""" )
lowerCAmelCase = text.replace("""\r""" , """<BR>""" )
lowerCAmelCase = text.replace("""\t""" , """<TAB>""" )
lowerCAmelCase = text.replace("""—""" , """ー""" )
lowerCAmelCase = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase = text.replace(lowerCAmelCase , lowerCAmelCase )
if clean:
lowerCAmelCase = self.clean_text(lowerCAmelCase )
def check_simbol(lowerCAmelCase : str ):
lowerCAmelCase = x.encode()
if len(lowerCAmelCase ) == 1 and len(lowerCAmelCase ) == 2:
lowerCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2_a1 and c <= 0Xc2_bf)
or (c >= 0Xc7_80 and c <= 0Xc7_83)
or (c >= 0Xca_b9 and c <= 0Xcb_bf)
or (c >= 0Xcc_80 and c <= 0Xcd_a2)
):
return True
return False
def checkuae(lowerCAmelCase : List[str] ):
lowerCAmelCase = x.encode()
if len(lowerCAmelCase ) == 1 and len(lowerCAmelCase ) == 3:
lowerCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f:
return True
return False
lowerCAmelCase = 0
lowerCAmelCase = []
while pos < len(lowerCAmelCase ):
lowerCAmelCase = min(len(lowerCAmelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
lowerCAmelCase = [] # (token_id, token, pos)
for e in range(lowerCAmelCase , lowerCAmelCase , -1 ):
lowerCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase ) > 2:
lowerCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase ) > 0:
# the smallest token_id is adopted
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[0] )[0]
result.append(lowerCAmelCase )
lowerCAmelCase = e
else:
lowerCAmelCase = pos + 1
lowerCAmelCase = text[pos:end]
if check_simbol(lowerCAmelCase ):
result.append("""<KIGOU>""" )
elif checkuae(lowerCAmelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
lowerCAmelCase = end
return result
def __lowercase ( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple="\n" ):
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase ) > 0:
words.append(bytearray(lowerCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
lowerCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(lowerCAmelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
words.append(bytearray(lowerCAmelCase ).decode("""utf-8""" , errors="""replace""" ) )
lowerCAmelCase = """""".join(lowerCAmelCase )
return text
| 155 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__SCREAMING_SNAKE_CASE ='pt'
elif is_tf_available():
__SCREAMING_SNAKE_CASE ='tf'
else:
__SCREAMING_SNAKE_CASE ='jax'
class UpperCamelCase ( _lowerCamelCase , unittest.TestCase ):
lowercase = ByTaTokenizer
lowercase = False
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
lowercase_ : Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=20 ,__UpperCamelCase=5 ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = []
for i in range(len(__UpperCamelCase ) ):
try:
lowercase_ : Any = tokenizer.decode([i] ,clean_up_tokenization_spaces=__UpperCamelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase_ : Union[str, Any] = list(filter(lambda __UpperCamelCase : re.match(r'^[ a-zA-Z]+$' ,t[1] ) ,__UpperCamelCase ) )
lowercase_ : Optional[Any] = list(filter(lambda __UpperCamelCase : [t[0]] == tokenizer.encode(t[1] ,add_special_tokens=__UpperCamelCase ) ,__UpperCamelCase ) )
if max_length is not None and len(__UpperCamelCase ) > max_length:
lowercase_ : List[str] = toks[:max_length]
if min_length is not None and len(__UpperCamelCase ) < min_length and len(__UpperCamelCase ) > 0:
while len(__UpperCamelCase ) < min_length:
lowercase_ : Optional[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowercase_ : List[str] = [t[0] for t in toks]
# Ensure consistency
lowercase_ : Dict = tokenizer.decode(__UpperCamelCase ,clean_up_tokenization_spaces=__UpperCamelCase )
if " " not in output_txt and len(__UpperCamelCase ) > 1:
lowercase_ : Optional[Any] = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=__UpperCamelCase )
+ ' '
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=__UpperCamelCase )
)
if with_prefix_space:
lowercase_ : Tuple = ' ' + output_txt
lowercase_ : List[str] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
return output_txt, output_ids
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.ta_base_tokenizer
lowercase_ : int = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
lowercase_ : Tuple = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] ,batch_without_eos_added['input_ids'] )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Optional[Any] = self.ta_base_tokenizer
lowercase_ : Dict = 'Unicode €.'
lowercase_ : List[str] = tokenizer(__UpperCamelCase )
lowercase_ : List[str] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] ,__UpperCamelCase )
# decoding
lowercase_ : Union[str, Any] = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,'Unicode €.</s>' )
lowercase_ : int = tokenizer('e è é ê ë' )
lowercase_ : Optional[int] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] ,__UpperCamelCase )
# decoding
lowercase_ : Optional[Any] = tokenizer.decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) ,'e è é ê ë</s>' )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = self.ta_base_tokenizer
lowercase_ : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowercase_ : List[str] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase_ : List[str] = tokenizer(__UpperCamelCase ,padding=__UpperCamelCase ,return_tensors=__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
if FRAMEWORK != "jax":
lowercase_ : Tuple = list(batch.input_ids.numpy()[0] )
else:
lowercase_ : Optional[int] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
self.assertEqual((2, 37) ,batch.input_ids.shape )
self.assertEqual((2, 37) ,batch.attention_mask.shape )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = self.ta_base_tokenizer
lowercase_ : Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowercase_ : str = tokenizer(__UpperCamelCase ,padding=__UpperCamelCase ,return_tensors=__UpperCamelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' ,__UpperCamelCase )
self.assertIn('attention_mask' ,__UpperCamelCase )
self.assertNotIn('decoder_input_ids' ,__UpperCamelCase )
self.assertNotIn('decoder_attention_mask' ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : str = self.ta_base_tokenizer
lowercase_ : List[Any] = [
'Summary of the text.',
'Another summary.',
]
lowercase_ : str = tokenizer(
text_target=__UpperCamelCase ,max_length=32 ,padding='max_length' ,truncation=__UpperCamelCase ,return_tensors=__UpperCamelCase )
self.assertEqual(32 ,targets['input_ids'].shape[1] )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = self.ta_base_tokenizer
lowercase_ : Optional[Any] = ['A long paragraph for summarization. </s>']
lowercase_ : int = ['Summary of the text. </s>']
# fmt: off
lowercase_ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase_ : Optional[int] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase_ : Tuple = tokenizer(__UpperCamelCase ,text_target=__UpperCamelCase )
self.assertEqual(__UpperCamelCase ,batch['input_ids'][0] )
self.assertEqual(__UpperCamelCase ,batch['labels'][0] )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
lowercase_ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase_ : Dict = tempfile.mkdtemp()
lowercase_ : List[str] = ' He is very happy, UNwant\u00E9d,running'
lowercase_ : Dict = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Tuple = tokenizer.__class__.from_pretrained(__UpperCamelCase )
lowercase_ : Optional[int] = after_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
shutil.rmtree(__UpperCamelCase )
lowercase_ : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase_ : int = tempfile.mkdtemp()
lowercase_ : Tuple = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowercase_ : Tuple = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowercase_ : str = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
tokenizer.save_pretrained(__UpperCamelCase )
lowercase_ : Union[str, Any] = tokenizer.__class__.from_pretrained(__UpperCamelCase )
lowercase_ : int = after_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
self.assertIn('new_additional_special_token' ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
lowercase_ : Optional[Any] = tokenizer.__class__.from_pretrained(__UpperCamelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,'special_tokens_map.json' ) ,encoding='utf-8' ) as json_file:
lowercase_ : Optional[Any] = json.load(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,'tokenizer_config.json' ) ,encoding='utf-8' ) as json_file:
lowercase_ : str = json.load(__UpperCamelCase )
lowercase_ : List[Any] = [f'''<extra_id_{i}>''' for i in range(125 )]
lowercase_ : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowercase_ : List[Any] = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__UpperCamelCase ,'special_tokens_map.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(__UpperCamelCase ,__UpperCamelCase )
with open(os.path.join(__UpperCamelCase ,'tokenizer_config.json' ) ,'w' ,encoding='utf-8' ) as outfile:
json.dump(__UpperCamelCase ,__UpperCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase_ : Dict = tokenizer_class.from_pretrained(
__UpperCamelCase ,)
self.assertIn(
'an_additional_special_token' ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) ,)
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase_ : Optional[int] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' ,lstrip=__UpperCamelCase )]
lowercase_ : Optional[int] = tokenizer_class.from_pretrained(
__UpperCamelCase ,additional_special_tokens=__UpperCamelCase ,)
self.assertIn('a_new_additional_special_token' ,tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] ,tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) ,)
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer_class.from_pretrained(__UpperCamelCase )
self.assertTrue(tokenizer.decode([255] ) == '' )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.get_tokenizers(fast=__UpperCamelCase ,do_lower_case=__UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowercase_ : List[str] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
lowercase_ : List[str] = tokenizer.convert_tokens_to_string(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowercase_ : List[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowercase_ : Optional[int] = 0
lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(
__UpperCamelCase ,skip_special_tokens=__UpperCamelCase )
for attr in attributes_list:
setattr(__UpperCamelCase ,attr + '_id' ,__UpperCamelCase )
self.assertEqual(getattr(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(getattr(__UpperCamelCase ,attr + '_id' ) ,__UpperCamelCase )
setattr(__UpperCamelCase ,attr + '_id' ,__UpperCamelCase )
self.assertEqual(getattr(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(getattr(__UpperCamelCase ,attr + '_id' ) ,__UpperCamelCase )
setattr(__UpperCamelCase ,'additional_special_tokens_ids' ,[] )
self.assertListEqual(getattr(__UpperCamelCase ,'additional_special_tokens' ) ,[] )
self.assertListEqual(getattr(__UpperCamelCase ,'additional_special_tokens_ids' ) ,[] )
setattr(__UpperCamelCase ,'additional_special_tokens_ids' ,[token_id_to_test_setters] )
self.assertListEqual(getattr(__UpperCamelCase ,'additional_special_tokens' ) ,[token_to_test_setters] )
self.assertListEqual(getattr(__UpperCamelCase ,'additional_special_tokens_ids' ) ,[token_id_to_test_setters] )
| 213 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[str] ) -> Optional[Any]:
lowerCamelCase_ = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase_ = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
lowerCamelCase_ = F'''{src_lang}-{tgt_lang}'''
lowerCamelCase_ = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
lowerCamelCase_ = os.path.join(UpperCAmelCase_ , 'README.md' )
print(F'''Generating {path}''' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(UpperCAmelCase_ )
# make sure we are under the root of the project
_SCREAMING_SNAKE_CASE : List[Any] = Path(__file__).resolve().parent.parent.parent
_SCREAMING_SNAKE_CASE : int = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_SCREAMING_SNAKE_CASE : Optional[int] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name)
| 183 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 0 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : str ):
'''simple docstring'''
__UpperCAmelCase : str = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
__UpperCAmelCase : List[Any] = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 226 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
def UpperCAmelCase ( a_ , a_ , a_ ) -> Any:
"""simple docstring"""
__A = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase: Dict = logging.getLogger(__name__)
def a( A : str , A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if os.path.exists(UpperCAmelCase_ ):
if os.path.exists(os.path.join(UpperCAmelCase_ , "config.json" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , "config.json" ) ):
os.remove(os.path.join(UpperCAmelCase_ , "config.json" ) )
if os.path.exists(os.path.join(UpperCAmelCase_ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , "pytorch_model.bin" ) ):
os.remove(os.path.join(UpperCAmelCase_ , "pytorch_model.bin" ) )
else:
os.makedirs(UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def a( A : Union[str, Any] , A : Union[str, Any]=False ) -> Dict:
"""simple docstring"""
a = 2
if unlogit:
a = torch.pow(UpperCAmelCase_ , UpperCAmelCase_ )
a = p * torch.log(UpperCAmelCase_ )
a = 0
return -plogp.sum(dim=-1 )
def a( A : str ) -> Optional[Any]:
"""simple docstring"""
logger.info("lv, h >\t" + "\t".join(f'''{x + 1}''' for x in range(len(UpperCAmelCase_ ) ) ) )
for row in range(len(UpperCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(f'''layer {row + 1}:\t''' + "\t".join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(f'''layer {row + 1}:\t''' + "\t".join(f'''{x:d}''' for x in tensor[row].cpu().data ) )
def a( A : List[str] , A : List[Any] , A : Optional[Any] , A : Tuple=True , A : str=True , A : Union[str, Any]=None , A : Optional[Any]=False ) -> Any:
"""simple docstring"""
a , a = model.config.num_hidden_layers, model.config.num_attention_heads
a = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
a = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
if head_mask is None:
a = torch.ones(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a = None
a = 0.0
a = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase_ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
a = tuple(t.to(args.device ) for t in inputs )
((a ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a , a , a = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase_ ):
a = entropy(attn.detach() , UpperCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a = 2
a = torch.pow(torch.pow(UpperCAmelCase_ , UpperCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
a = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(UpperCAmelCase_ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(UpperCAmelCase_ )
logger.info("Head ranked by importance scores" )
a = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a = torch.arange(
head_importance.numel() , device=args.device )
a = head_ranks.view_as(UpperCAmelCase_ )
print_ad_tensor(UpperCAmelCase_ )
return attn_entropy, head_importance, total_loss
def a( A : List[Any] , A : Union[str, Any] , A : Optional[int] ) -> Tuple:
"""simple docstring"""
a , a , a = compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ )
a = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , UpperCAmelCase_ , original_score * args.masking_threshold )
a = torch.ones_like(UpperCAmelCase_ )
a = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a = original_score
while current_score >= original_score * args.masking_threshold:
a = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a = float("Inf" )
a = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase_ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
a = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
a = new_head_mask.view(-1 )
a = 0.0
a = new_head_mask.view_as(UpperCAmelCase_ )
a = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase_ )
# Compute metric and head importance again
a , a , a = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
a = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , UpperCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(UpperCAmelCase_ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def a( A : Union[str, Any] , A : Union[str, Any] , A : Tuple , A : Tuple ) -> int:
"""simple docstring"""
a = datetime.now()
a , a , a = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
a = 1 / loss
a = datetime.now() - before_time
a = sum(p.numel() for p in model.parameters() )
a = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a = [
v,
]
assert sum(len(UpperCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase_ )
a = sum(p.numel() for p in model.parameters() )
a = datetime.now()
a , a , a = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , actually_pruned=UpperCAmelCase_ , )
a = 1 / loss
a = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , UpperCAmelCase_ , UpperCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(UpperCAmelCase_ , args.output_dir )
def a( ) -> Union[str, Any]:
"""simple docstring"""
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=UpperCAmelCase_ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=UpperCAmelCase_ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=UpperCAmelCase_ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=UpperCAmelCase_ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=UpperCAmelCase_ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=UpperCAmelCase_ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCAmelCase_ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=UpperCAmelCase_ , help="Batch size." )
parser.add_argument("--seed" , type=UpperCAmelCase_ , default=42 )
parser.add_argument("--local_rank" , type=UpperCAmelCase_ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=UpperCAmelCase_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=UpperCAmelCase_ , default="" , help="Can be used for distant debugging." )
a = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
a = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a = torch.device("cuda" , args.local_rank )
a = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a = nn.parallel.DistributedDataParallel(
UpperCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase_ )
elif args.n_gpu > 1:
a = nn.DataParallel(UpperCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase_ )
torch.save(UpperCAmelCase_ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , UpperCAmelCase_ )
# Prepare dataset
a = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a = (torch.from_numpy(UpperCAmelCase_ ),)
a = TensorDataset(*UpperCAmelCase_ )
a = RandomSampler(UpperCAmelCase_ )
a = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a = mask_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
prune_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 227 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__A = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
__A = 'hopper-medium-v2'
__A = gym.make(env_name)
__A = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
__A = env.reset()
__A = 0
__A = 0
__A = 10_00
__A = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__A = pipeline(obs, planning_horizon=32)
# execute action in environment
__A = env.step(denorm_actions)
__A = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__A = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 90 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase__ = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None ):
if rng is None:
_lowerCamelCase : Tuple = random.Random()
_lowerCamelCase : Optional[int] = 1
for dim in shape:
total_dims *= dim
_lowerCamelCase : List[str] = []
for _ in range(UpperCAmelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowerCamelCase : int = np.array(UpperCAmelCase_ , dtype=jnp.intaa ).reshape(UpperCAmelCase_ )
return output
def _snake_case ( lowercase__ , lowercase__=None ):
_lowerCamelCase : Optional[Any] = ids_tensor(UpperCAmelCase_ , vocab_size=2 , rng=UpperCAmelCase_ )
# make sure that at least one token is attended to for each batch
_lowerCamelCase : Optional[Any] = 1
return attn_mask
@require_flax
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = None
lowerCamelCase__ = ()
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowerCamelCase : Tuple = 2
_lowerCamelCase : Union[str, Any] = inputs['input_ids'].shape[-1] // 2
_lowerCamelCase : int = inputs['input_ids'][:max_batch_size, :sequence_length]
_lowerCamelCase : List[Any] = jnp.ones_like(lowercase )
_lowerCamelCase : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowerCamelCase : Dict = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowerCamelCase : Optional[int] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = self._get_input_ids_and_config()
_lowerCamelCase : List[Any] = False
_lowerCamelCase : int = max_length
_lowerCamelCase : List[str] = 0
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Any = model_class(lowercase )
_lowerCamelCase : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowerCamelCase : Dict = getattr(lowercase , lowercase )
_lowerCamelCase : Dict = pt_model_class(lowercase ).eval()
_lowerCamelCase : Tuple = load_flax_weights_in_pytorch_model(lowercase , flax_model.params )
_lowerCamelCase : List[str] = flax_model.generate(lowercase ).sequences
_lowerCamelCase : Tuple = pt_model.generate(torch.tensor(lowercase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowerCamelCase : int = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
_lowerCamelCase : List[str] = False
_lowerCamelCase : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Dict = model_class(lowercase )
_lowerCamelCase : Dict = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Union[str, Any] = jit(model.generate )
_lowerCamelCase : List[str] = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = self._get_input_ids_and_config()
_lowerCamelCase : Dict = True
_lowerCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Tuple = model_class(lowercase )
_lowerCamelCase : Tuple = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : int = max_length
_lowerCamelCase : Tuple = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Dict = model_class(lowercase )
_lowerCamelCase : str = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : str = jit(model.generate )
_lowerCamelCase : List[Any] = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[str] = self._get_input_ids_and_config()
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : Any = 2
_lowerCamelCase : Tuple = 2
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Tuple = model_class(lowercase )
_lowerCamelCase : int = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = self._get_input_ids_and_config()
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Union[str, Any] = max_length
_lowerCamelCase : Dict = 0.8
_lowerCamelCase : Optional[Any] = 10
_lowerCamelCase : Tuple = 0.3
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Optional[int] = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : List[Any] = model_class(lowercase )
_lowerCamelCase : Any = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : int = jit(model.generate )
_lowerCamelCase : int = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
_lowerCamelCase : Optional[int] = max_length
_lowerCamelCase : Dict = 1
_lowerCamelCase : int = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Optional[Any] = model_class(lowercase )
_lowerCamelCase : Optional[Any] = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Tuple = jit(model.generate )
_lowerCamelCase : Optional[Any] = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = self._get_input_ids_and_config()
_lowerCamelCase : Tuple = max_length
_lowerCamelCase : Tuple = 2
_lowerCamelCase : int = 1
_lowerCamelCase : Any = 8
_lowerCamelCase : Dict = 9
for model_class in self.all_generative_model_classes:
_lowerCamelCase : str = model_class(lowercase )
_lowerCamelCase : List[Any] = model.generate(lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Optional[Any] = jit(model.generate )
_lowerCamelCase : Dict = jit_generate(lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Optional[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Dict = model_class(lowercase )
_lowerCamelCase : str = model.generate(lowercase , attention_mask=lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : List[str] = jit(model.generate )
_lowerCamelCase : Any = jit_generate(lowercase , attention_mask=lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : List[str] = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Tuple = model_class(lowercase )
_lowerCamelCase : Union[str, Any] = model.generate(lowercase , attention_mask=lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : Any = jit(model.generate )
_lowerCamelCase : Union[str, Any] = jit_generate(lowercase , attention_mask=lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowerCamelCase : int = attention_mask.at[(0, 0)].set(0 )
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowerCamelCase : Tuple = model_class(lowercase )
_lowerCamelCase : str = model.generate(lowercase , attention_mask=lowercase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase )
_lowerCamelCase : str = jit(model.generate )
_lowerCamelCase : Union[str, Any] = jit_generate(lowercase , attention_mask=lowercase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert' )
_lowerCamelCase : str = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
_lowerCamelCase : Tuple = 'Hello world'
_lowerCamelCase : Optional[int] = tokenizer(lowercase , return_tensors='np' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase , 'do_samples' ):
model.generate(lowercase , do_samples=lowercase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase , 'foo' ):
_lowerCamelCase : str = {'foo': 'bar'}
model.generate(lowercase , **lowercase ) | 96 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 0 |
"""simple docstring"""
from collections import deque
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[int] = len(UpperCAmelCase_ )
A_ : Any = deque()
A_ : List[str] = [False for _ in range(UpperCAmelCase_ )]
A_ : int = [-1 for _ in range(UpperCAmelCase_ )]
A_ : str = index_of[:]
def strong_connect(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A_ : Any = index # the number when this node is seen
A_ : Tuple = index # lowest rank node reachable from here
index += 1
stack.append(UpperCAmelCase_ )
A_ : int = True
for w in g[v]:
if index_of[w] == -1:
A_ : Union[str, Any] = strong_connect(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A_ : Optional[Any] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A_ : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A_ : List[Any] = []
A_ : Optional[Any] = stack.pop()
A_ : Optional[Any] = False
component.append(UpperCAmelCase_ )
while w != v:
A_ : Union[str, Any] = stack.pop()
A_ : Optional[Any] = False
component.append(UpperCAmelCase_ )
components.append(UpperCAmelCase_ )
return index
A_ : Optional[Any] = []
for v in range(UpperCAmelCase_ ):
if index_of[v] == -1:
strong_connect(UpperCAmelCase_ , 0 , UpperCAmelCase_ )
return components
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : str = [[] for _ in range(UpperCAmelCase_ )]
for u, v in edges:
g[u].append(UpperCAmelCase_ )
return g
if __name__ == "__main__":
# Test
_lowerCamelCase : int = 7
_lowerCamelCase : List[str] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowerCamelCase : Optional[int] = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowerCamelCase : Union[str, Any] = [(u, v) for u, v in zip(source, target)]
_lowerCamelCase : Any = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 167 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 158 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 0 |
import math
class UpperCamelCase__ :
def __init__(self : List[Any] , snake_case_ : List[str]=0 ): # a graph with Node 0,1,...,N-1
__a : List[str] = n
__a : List[str] = [
[math.inf for j in range(0 , snake_case_ )] for i in range(0 , snake_case_ )
] # adjacency matrix for weight
__a : Optional[int] = [
[math.inf for j in range(0 , snake_case_ )] for i in range(0 , snake_case_ )
] # dp[i][j] stores minimum distance from i to j
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
__a : str = w
def lowerCAmelCase (self : int ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
__a : Optional[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def lowerCAmelCase (self : int , snake_case_ : List[str] , snake_case_ : Dict ):
return self.dp[u][v]
if __name__ == "__main__":
lowercase__ =Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 216 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 0 |
"""simple docstring"""
import torch
from torch import nn
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Optional[Any]=False ):
super().__init__()
lowerCAmelCase = n_token
lowerCAmelCase = d_embed
lowerCAmelCase = d_proj
lowerCAmelCase = cutoffs + [n_token]
lowerCAmelCase = [0] + self.cutoffs
lowerCAmelCase = div_val
lowerCAmelCase = self.cutoffs[0]
lowerCAmelCase = len(self.cutoffs ) - 1
lowerCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase = nn.ModuleList()
lowerCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase ) ) )
else:
self.out_projs.append(lowerCAmelCase )
self.out_layers.append(nn.Linear(lowerCAmelCase , lowerCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase , lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase , lowerCAmelCase ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase , r_idx - l_idx ) )
lowerCAmelCase = keep_order
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict ):
if proj is None:
lowerCAmelCase = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase = nn.functional.linear(lowerCAmelCase , proj.t().contiguous() )
lowerCAmelCase = nn.functional.linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def __lowercase ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[int]=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase = hidden[..., :-1, :].contiguous()
lowerCAmelCase = labels[..., 1:].contiguous()
lowerCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowerCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase = labels != -100
lowerCAmelCase = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase = (
-nn.functional.log_softmax(lowerCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase = nn.functional.log_softmax(lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase , lowerCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase , lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i].weight
lowerCAmelCase = self.out_layers[i].bias
if i == 0:
lowerCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase )
biases.append(lowerCAmelCase )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = nn.functional.log_softmax(lowerCAmelCase , dim=1 )
if labels is None:
lowerCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase = torch.zeros_like(lowerCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase = 0
lowerCAmelCase = [0] + self.cutoffs
for i in range(len(lowerCAmelCase ) - 1 ):
lowerCAmelCase , lowerCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase = labels.index_select(0 , lowerCAmelCase ) - l_idx
lowerCAmelCase = head_logprob.index_select(0 , lowerCAmelCase )
lowerCAmelCase = hidden.index_select(0 , lowerCAmelCase )
else:
lowerCAmelCase = hidden
if i == 0:
if labels is not None:
lowerCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = nn.functional.log_softmax(lowerCAmelCase , dim=1 )
lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def __lowercase ( self : Any , lowerCAmelCase : int ):
if self.n_clusters == 0:
lowerCAmelCase = self._compute_logit(lowerCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase , lowerCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase , lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i].weight
lowerCAmelCase = self.out_layers[i].bias
if i == 0:
lowerCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase )
biases.append(lowerCAmelCase )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase = nn.functional.log_softmax(lowerCAmelCase , dim=1 )
lowerCAmelCase = [0] + self.cutoffs
for i in range(len(lowerCAmelCase ) - 1 ):
lowerCAmelCase , lowerCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase = self._compute_logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = nn.functional.log_softmax(lowerCAmelCase , dim=1 )
lowerCAmelCase = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase = logprob_i
return out
| 155 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'kwargs, expected' , [
({'num_shards': 0, 'max_num_jobs': 1}, []),
({'num_shards': 10, 'max_num_jobs': 1}, [range(10 )]),
({'num_shards': 10, 'max_num_jobs': 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({'num_shards': 1, 'max_num_jobs': 10}, [range(1 )]),
({'num_shards': 10, 'max_num_jobs': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'num_shards': 3, 'max_num_jobs': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[Any] = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, max_num_jobs, expected' , [
({'foo': 0}, 10, [{'foo': 0}]),
({'shards': [0, 1, 2, 3]}, 1, [{'shards': [0, 1, 2, 3]}]),
({'shards': [0, 1, 2, 3]}, 4, [{'shards': [0]}, {'shards': [1]}, {'shards': [2]}, {'shards': [3]}]),
({'shards': [0, 1]}, 4, [{'shards': [0]}, {'shards': [1]}]),
({'shards': [0, 1, 2, 3]}, 2, [{'shards': [0, 1]}, {'shards': [2, 3]}]),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ):
lowercase_ : List[str] = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
'gen_kwargs, expected' , [
({'foo': 0}, 1),
({'shards': [0]}, 1),
({'shards': [0, 1, 2, 3]}, 4),
({'shards': [0, 1, 2, 3], 'foo': 0}, 4),
({'shards': [0, 1, 2, 3], 'other': (0, 1)}, 4),
({'shards': [0, 1, 2, 3], 'shards2': [0, 1]}, RuntimeError),
] , )
def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
lowercase_ : Union[str, Any] = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 213 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : Dict ) -> List[str]: # noqa: E741
lowerCamelCase_ = len(UpperCAmelCase_ )
lowerCamelCase_ = 0
lowerCamelCase_ = [0] * n
lowerCamelCase_ = [False] * n
lowerCamelCase_ = [False] * n
def dfs(_lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ):
if parent == root:
out_edge_count += 1
lowerCamelCase_ = True
lowerCamelCase_ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCamelCase_ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowerCamelCase_ = True
# AP found via cycle
if at == low[to]:
lowerCamelCase_ = True
else:
lowerCamelCase_ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
lowerCamelCase_ = 0
lowerCamelCase_ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
lowerCamelCase_ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
_SCREAMING_SNAKE_CASE : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 183 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 0 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = R'''\w+[.]\d+'''
__UpperCAmelCase : Dict = re.findall(UpperCAmelCase_ , UpperCAmelCase_ )
for pat in pats:
__UpperCAmelCase : Optional[int] = key.replace(UpperCAmelCase_ , '''_'''.join(pat.split('''.''' ) ) )
return key
def a ( _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__UpperCAmelCase : str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__UpperCAmelCase : Any = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__UpperCAmelCase : Dict = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__UpperCAmelCase : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__UpperCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__UpperCAmelCase : Tuple = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__UpperCAmelCase : List[str] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__UpperCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a ( _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=42 ):
'''simple docstring'''
__UpperCAmelCase : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__UpperCAmelCase : Optional[int] = flax_model.init_weights(PRNGKey(UpperCAmelCase_ ) )
__UpperCAmelCase : Union[str, Any] = flatten_dict(UpperCAmelCase_ )
__UpperCAmelCase : int = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__UpperCAmelCase : str = rename_key(UpperCAmelCase_ )
__UpperCAmelCase : Any = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = rename_key_and_reshape_tensor(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# also add unexpected weight so that warning is thrown
__UpperCAmelCase : Union[str, Any] = jnp.asarray(UpperCAmelCase_ )
return unflatten_dict(UpperCAmelCase_ )
| 226 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 0 |
def UpperCAmelCase ( a_ = 2_0_0 ) -> Union[str, Any]:
"""simple docstring"""
__A = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
__A = [0] * (pence + 1)
__A = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(UpperCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 15 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_lowercase: Dict = logging.get_logger(__name__)
_lowercase: List[Any] = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = "imagegpt"
__A = ["past_key_values"]
__A = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , lowerCamelCase_=512 + 1 , lowerCamelCase_=32 * 32 , lowerCamelCase_=512 , lowerCamelCase_=24 , lowerCamelCase_=8 , lowerCamelCase_=None , lowerCamelCase_="quick_gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=1E-5 , lowerCamelCase_=0.02 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , **lowerCamelCase_ , ):
"""simple docstring"""
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = n_inner
a = activation_function
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = scale_attn_weights
a = use_cache
a = scale_attn_by_inverse_layer_idx
a = reorder_and_upcast_attn
a = tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ )
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = -1 , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = 3 , lowerCamelCase_ = 32 , lowerCamelCase_ = 32 , ):
"""simple docstring"""
a = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = dict(preprocessor(images=lowerCamelCase_ , return_tensors=lowerCamelCase_ ) )
return inputs
| 227 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 0 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__A = logging.getLogger(__name__)
class __lowerCAmelCase ( _lowerCamelCase ):
"""simple docstring"""
snake_case_ = '''sequence-classification'''
def __init__( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
if type(lowerCamelCase__ ) == dict:
__lowerCamelCase = Namespace(**lowerCamelCase__ )
__lowerCamelCase = glue_output_modes[hparams.task]
__lowerCamelCase = glue_tasks_num_labels[hparams.task]
super().__init__(lowerCamelCase__ , lowerCamelCase__ , self.mode )
def lowercase_ ( self , **lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.model(**lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCamelCase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__lowerCamelCase = self(**lowerCamelCase__ )
__lowerCamelCase = outputs[0]
__lowerCamelCase = self.trainer.lr_schedulers[0]['scheduler']
__lowerCamelCase = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = self.hparams
__lowerCamelCase = processors[args.task]()
__lowerCamelCase = processor.get_labels()
for mode in ["train", "dev"]:
__lowerCamelCase = self._feature_file(lowerCamelCase__ )
if os.path.exists(lowerCamelCase__ ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowerCamelCase__ )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
__lowerCamelCase = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
__lowerCamelCase = convert_examples_to_features(
lowerCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowerCamelCase__ )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> Any:
'''simple docstring'''
__lowerCamelCase = 'dev' if mode == 'test' else mode
__lowerCamelCase = self._feature_file(lowerCamelCase__ )
logger.info('Loading features from cached file %s' , lowerCamelCase__ )
__lowerCamelCase = torch.load(lowerCamelCase__ )
__lowerCamelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowerCamelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__lowerCamelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__lowerCamelCase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__lowerCamelCase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , batch_size=lowerCamelCase__ , shuffle=lowerCamelCase__ , )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCamelCase = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
__lowerCamelCase = self(**lowerCamelCase__ )
__lowerCamelCase , __lowerCamelCase = outputs[:2]
__lowerCamelCase = logits.detach().cpu().numpy()
__lowerCamelCase = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
__lowerCamelCase = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__lowerCamelCase = np.argmax(lowerCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__lowerCamelCase = np.squeeze(lowerCamelCase__ )
__lowerCamelCase = np.concatenate([x['target'] for x in outputs] , axis=0 )
__lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCamelCase = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCamelCase = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowerCamelCase__ , lowerCamelCase__ )}
__lowerCamelCase = dict(results.items() )
__lowerCamelCase = results
return ret, preds_list, out_label_list
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._eval_end(lowerCamelCase__ )
__lowerCamelCase = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self._eval_end(lowerCamelCase__ )
__lowerCamelCase = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowercase_ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowerCamelCase__ , lowerCamelCase__ )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowerCamelCase__ , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowerCamelCase__ , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser()
add_generic_args(UpperCAmelCase_ , os.getcwd() )
__lowerCamelCase = GLUETransformer.add_model_specific_args(UpperCAmelCase_ , os.getcwd() )
__lowerCamelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__lowerCamelCase = os.path.join(
'./results' , F"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
__lowerCamelCase = GLUETransformer(UpperCAmelCase_ )
__lowerCamelCase = generic_train(UpperCAmelCase_ , UpperCAmelCase_ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__lowerCamelCase = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=UpperCAmelCase_ ) )
__lowerCamelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 90 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowercase__ = Mapping[str, np.ndarray]
lowercase__ = Mapping[str, Any] # Is a nested dict.
lowercase__ = 0.01
@dataclasses.dataclass(frozen=_lowerCamelCase )
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCamelCase__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCamelCase__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCamelCase__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCamelCase__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCamelCase__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCamelCase__ = None
# Templates used to generate this protein (prediction-only)
lowerCamelCase__ = None
# Chain corresponding to each parent
lowerCamelCase__ = None
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = r'(\[[A-Z]+\]\n)'
_lowerCamelCase : Tuple = [tag.strip() for tag in re.split(UpperCAmelCase_ , UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0]
_lowerCamelCase : Optional[int] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
_lowerCamelCase : Tuple = ['N', 'CA', 'C']
_lowerCamelCase : str = None
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Optional[int] = None
for g in groups:
if "[PRIMARY]" == g[0]:
_lowerCamelCase : Optional[Any] = g[1][0].strip()
for i in range(len(UpperCAmelCase_ ) ):
if seq[i] not in residue_constants.restypes:
_lowerCamelCase : List[str] = 'X' # FIXME: strings are immutable
_lowerCamelCase : Optional[Any] = np.array(
[residue_constants.restype_order.get(UpperCAmelCase_ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_lowerCamelCase : Tuple = []
for axis in range(3 ):
tertiary.append(list(map(UpperCAmelCase_ , g[1][axis].split() ) ) )
_lowerCamelCase : Union[str, Any] = np.array(UpperCAmelCase_ )
_lowerCamelCase : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(UpperCAmelCase_ ):
_lowerCamelCase : Tuple = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_lowerCamelCase : Optional[int] = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
_lowerCamelCase : Union[str, Any] = np.zeros(
(
len(UpperCAmelCase_ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(UpperCAmelCase_ ):
_lowerCamelCase : str = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=UpperCAmelCase_ , atom_mask=UpperCAmelCase_ , aatype=UpperCAmelCase_ , residue_index=np.arange(len(UpperCAmelCase_ ) ) , b_factors=UpperCAmelCase_ , )
def _snake_case ( lowercase__ , lowercase__ = 0 ):
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[Any] = prot.remark
if remark is not None:
pdb_headers.append(f'''REMARK {remark}''' )
_lowerCamelCase : List[Any] = prot.parents
_lowerCamelCase : Dict = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_lowerCamelCase : List[str] = [p for i, p in zip(UpperCAmelCase_ , UpperCAmelCase_ ) if i == chain_id]
if parents is None or len(UpperCAmelCase_ ) == 0:
_lowerCamelCase : List[str] = ['N/A']
pdb_headers.append(f'''PARENT {' '.join(UpperCAmelCase_ )}''' )
return pdb_headers
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : int = []
_lowerCamelCase : str = pdb_str.split('\n' )
_lowerCamelCase : Any = prot.remark
if remark is not None:
out_pdb_lines.append(f'''REMARK {remark}''' )
_lowerCamelCase : str = 42
if prot.parents is not None and len(prot.parents ) > 0:
_lowerCamelCase : Optional[int] = []
if prot.parents_chain_index is not None:
_lowerCamelCase : Optional[int] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(UpperCAmelCase_ ) , [] )
parent_dict[str(UpperCAmelCase_ )].append(UpperCAmelCase_ )
_lowerCamelCase : List[str] = max([int(UpperCAmelCase_ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_lowerCamelCase : Union[str, Any] = parent_dict.get(str(UpperCAmelCase_ ) , ['N/A'] )
parents_per_chain.append(UpperCAmelCase_ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_lowerCamelCase : Optional[Any] = [['N/A']]
def make_parent_line(lowercase__ ) -> str:
return f'''PARENT {' '.join(UpperCAmelCase_ )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_lowerCamelCase : str = 0
for i, l in enumerate(UpperCAmelCase_ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(UpperCAmelCase_ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(UpperCAmelCase_ ):
_lowerCamelCase : Dict = parents_per_chain[chain_counter]
else:
_lowerCamelCase : int = ['N/A']
out_pdb_lines.append(make_parent_line(UpperCAmelCase_ ) )
return "\n".join(UpperCAmelCase_ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Dict = residue_constants.restypes + ['X']
def res_atoa(lowercase__ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
_lowerCamelCase : List[Any] = residue_constants.atom_types
_lowerCamelCase : Dict = []
_lowerCamelCase : int = prot.atom_mask
_lowerCamelCase : List[Any] = prot.aatype
_lowerCamelCase : str = prot.atom_positions
_lowerCamelCase : str = prot.residue_index.astype(np.intaa )
_lowerCamelCase : Union[str, Any] = prot.b_factors
_lowerCamelCase : Dict = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
_lowerCamelCase : List[str] = get_pdb_headers(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
pdb_lines.extend(UpperCAmelCase_ )
_lowerCamelCase : Union[str, Any] = aatype.shape[0]
_lowerCamelCase : Tuple = 1
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Dict = string.ascii_uppercase
_lowerCamelCase : Dict = None
# Add all atom sites.
for i in range(UpperCAmelCase_ ):
_lowerCamelCase : List[str] = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(UpperCAmelCase_ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_lowerCamelCase : Dict = 'ATOM'
_lowerCamelCase : Optional[Any] = atom_name if len(UpperCAmelCase_ ) == 4 else f''' {atom_name}'''
_lowerCamelCase : List[str] = ''
_lowerCamelCase : List[str] = ''
_lowerCamelCase : Optional[Any] = 1.0_0
_lowerCamelCase : List[Any] = atom_name[0] # Protein supports only C, N, O, S, this works.
_lowerCamelCase : str = ''
_lowerCamelCase : str = 'A'
if chain_index is not None:
_lowerCamelCase : Optional[Any] = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_lowerCamelCase : Any = (
f'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
f'''{res_name_a:>3} {chain_tag:>1}'''
f'''{residue_index[i]:>4}{insertion_code:>1} '''
f'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
f'''{occupancy:>6.2f}{b_factor:>6.2f} '''
f'''{element:>2}{charge:>2}'''
)
pdb_lines.append(UpperCAmelCase_ )
atom_index += 1
_lowerCamelCase : List[Any] = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_lowerCamelCase : Tuple = True
_lowerCamelCase : int = chain_index[i + 1]
if should_terminate:
# Close the chain.
_lowerCamelCase : Union[str, Any] = 'TER'
_lowerCamelCase : Union[str, Any] = (
f'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(UpperCAmelCase_ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(UpperCAmelCase_ , UpperCAmelCase_ ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(UpperCAmelCase_ )
def _snake_case ( lowercase__ ):
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , ):
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=UpperCAmelCase_ , remark=UpperCAmelCase_ , parents=UpperCAmelCase_ , parents_chain_index=UpperCAmelCase_ , ) | 96 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : int = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Tuple = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class lowercase :
def __init__( self : Any , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[str]=None ):
"""simple docstring"""
A_ : str = list(poly_a or [0] )[:]
A_ : int = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
A_ : Union[str, Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
A_ : List[str] = len(self.polyB )
# Add 0 to make lengths equal a power of 2
A_ : List[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
A_ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
A_ : Optional[Any] = self.__multiply()
def a_ ( self : Optional[int] , _lowerCamelCase : int ):
"""simple docstring"""
A_ : str = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(_lowerCamelCase ) <= 1:
return dft[0]
#
A_ : List[str] = self.c_max_length // 2
while next_ncol > 0:
A_ : Union[str, Any] = [[] for i in range(_lowerCamelCase )]
A_ : Tuple = self.root**next_ncol
# First half of next step
A_ : List[str] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
A_ : Dict = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
A_ : Optional[Any] = new_dft
A_ : Optional[Any] = next_ncol // 2
return dft[0]
def a_ ( self : str ):
"""simple docstring"""
A_ : str = self.__dft('''A''' )
A_ : int = self.__dft('''B''' )
A_ : List[str] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
A_ : Optional[Any] = 2
while next_ncol <= self.c_max_length:
A_ : Optional[int] = [[] for i in range(_lowerCamelCase )]
A_ : Optional[int] = self.root ** (next_ncol // 2)
A_ : Optional[Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
A_ : Tuple = new_inverse_c
next_ncol *= 2
# Unpack
A_ : Dict = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : str ):
"""simple docstring"""
A_ : str = '''A = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
A_ : List[str] = '''B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
A_ : Dict = '''A*B = ''' + ''' + '''.join(
F"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return F"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
A__ = _distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
A__ = _split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
A__ = _number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 335 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
_enforce_args(UpperCAmelCase_ , UpperCAmelCase_ )
if n == 0:
return 0
_lowerCAmelCase = float("-inf" )
for i in range(1 , n + 1 ):
_lowerCAmelCase = max(
UpperCAmelCase_ , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCAmelCase_ ) )
return max_revue
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
_enforce_args(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowerCAmelCase = float("-inf" )
for i in range(1 , n + 1 ):
_lowerCAmelCase = max(
UpperCAmelCase_ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCAmelCase_ , UpperCAmelCase_ ) , )
_lowerCAmelCase = max_revenue
return max_rev[n]
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
_enforce_args(UpperCAmelCase_ , UpperCAmelCase_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowerCAmelCase = [float("-inf" ) for _ in range(n + 1 )]
_lowerCAmelCase = 0
for i in range(1 , n + 1 ):
_lowerCAmelCase = max_rev[i]
for j in range(1 , i + 1 ):
_lowerCAmelCase = max(UpperCAmelCase_ , prices[j - 1] + max_rev[i - j] )
_lowerCAmelCase = max_revenue_i
return max_rev[n]
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if n < 0:
_lowerCAmelCase = F'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(UpperCAmelCase_ )
if n > len(UpperCAmelCase_ ):
_lowerCAmelCase = (
"Each integral piece of rod must have a corresponding price. "
F'''Got n = {n} but length of prices = {len(UpperCAmelCase_ )}'''
)
raise ValueError(UpperCAmelCase_ )
def __a():
'''simple docstring'''
_lowerCAmelCase = [6, 10, 12, 15, 20, 23]
_lowerCAmelCase = len(UpperCAmelCase_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowerCAmelCase = 36
_lowerCAmelCase = top_down_cut_rod(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = bottom_up_cut_rod(UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = naive_cut_rod_recursive(UpperCAmelCase_ , UpperCAmelCase_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 158 |
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ : str = parse(importlib.metadata.version('torch'))
def _snake_case ( UpperCAmelCase_ : Union[str, Version] , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
A__ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = parse(importlib.metadata.version(UpperCAmelCase_ ) )
return operation(UpperCAmelCase_ , parse(UpperCAmelCase_ ) )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
return compare_versions(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ ={
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE_ : Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE_ : str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE_ : List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int = CHRF.CHAR_ORDER , UpperCamelCase: int = CHRF.WORD_ORDER , UpperCamelCase: int = CHRF.BETA , UpperCamelCase: bool = False , UpperCamelCase: bool = False , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = len(references[0] )
if any(len(UpperCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(UpperCamelCase )]
A__ = CHRF(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = sb_chrf.corpus_score(UpperCamelCase , UpperCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 335 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
a = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
_a = 'tapas'
def __init__( self : str , lowerCAmelCase : str=3_0522 , lowerCAmelCase : int=768 , lowerCAmelCase : Union[str, Any]=12 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Union[str, Any]=3072 , lowerCAmelCase : str="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : str=0.1 , lowerCAmelCase : Union[str, Any]=1024 , lowerCAmelCase : Tuple=[3, 256, 256, 2, 256, 256, 10] , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[str]=1e-12 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : str=10.0 , lowerCAmelCase : int=0 , lowerCAmelCase : Tuple=1.0 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=1.0 , lowerCAmelCase : int=False , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : int=1.0 , lowerCAmelCase : List[Any]=1.0 , lowerCAmelCase : Dict=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : List[Any]="ratio" , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : List[str]=True , lowerCAmelCase : str=False , lowerCAmelCase : str=False , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=False , lowerCAmelCase : str=None , lowerCAmelCase : str=None , **lowerCAmelCase : List[Any] , ):
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_sizes
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase = positive_label_weight
lowerCAmelCase = num_aggregation_labels
lowerCAmelCase = aggregation_loss_weight
lowerCAmelCase = use_answer_as_supervision
lowerCAmelCase = answer_loss_importance
lowerCAmelCase = use_normalized_answer_loss
lowerCAmelCase = huber_loss_delta
lowerCAmelCase = temperature
lowerCAmelCase = aggregation_temperature
lowerCAmelCase = use_gumbel_for_cells
lowerCAmelCase = use_gumbel_for_aggregation
lowerCAmelCase = average_approximation_function
lowerCAmelCase = cell_selection_preference
lowerCAmelCase = answer_loss_cutoff
lowerCAmelCase = max_num_rows
lowerCAmelCase = max_num_columns
lowerCAmelCase = average_logits_per_cell
lowerCAmelCase = select_one_column
lowerCAmelCase = allow_empty_column_selection
lowerCAmelCase = init_cell_selection_weights_to_zero
lowerCAmelCase = reset_position_index_per_cell
lowerCAmelCase = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase = aggregation_labels
lowerCAmelCase = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCAmelCase ):
lowerCAmelCase = {int(lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 155 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Optional[int] , *UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple=None , UpperCamelCase: Tuple=None , **UpperCamelCase: Dict ):
"""simple docstring"""
super().__init__(*UpperCamelCase , **UpperCamelCase )
A__ = eval_examples
A__ = post_process_function
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: Optional[Dataset] = None , UpperCamelCase: List[Any]=None , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "eval" , **UpperCamelCase: Optional[int] , ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A__ = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A__ = gen_kwargs
A__ = self.eval_dataset if eval_dataset is None else eval_dataset
A__ = self.get_eval_dataloader(UpperCamelCase )
A__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
else:
A__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase )
return metrics
def UpperCamelCase ( self: List[Any] , UpperCamelCase: Dict , UpperCamelCase: List[str] , UpperCamelCase: Dict=None , UpperCamelCase: str = "test" , **UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = gen_kwargs.copy()
A__ = self.get_test_dataloader(UpperCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
A__ = self.compute_metrics
A__ = None
A__ = time.time()
A__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A__ = eval_loop(
UpperCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase , metric_key_prefix=UpperCamelCase , )
finally:
A__ = compute_metrics
A__ = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
UpperCamelCase , UpperCamelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A__ = self.post_process_function(UpperCamelCase , UpperCamelCase , UpperCamelCase , """predict""" )
A__ = self.compute_metrics(UpperCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
A__ = metrics.pop(UpperCamelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase )
| 335 | 0 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : set ):
lowercase_ , lowercase_ : int = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase_ : Optional[int] = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213 |
"""simple docstring"""
class a :
"""simple docstring"""
def __init__( self: Dict ):
"""simple docstring"""
A__ = {}
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase , """ -> """ , """ -> """.join([str(UpperCamelCase ) for j in self.vertex[i]] ) )
def UpperCamelCase ( self: Any , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: int , UpperCamelCase: list ):
"""simple docstring"""
A__ = True
print(UpperCamelCase , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 335 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> Optional[Any]:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase__ ( _lowerCamelCase : int ) -> int:
lowerCamelCase_ = []
lowerCamelCase_ = 11
lowerCamelCase_ = int('1' + '0' * digit_len )
for num in range(UpperCAmelCase_ , UpperCAmelCase_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(UpperCAmelCase_ , UpperCAmelCase_ ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
lowerCamelCase_ = 10
return solutions
def lowerCamelCase__ ( _lowerCamelCase : int = 2 ) -> Optional[int]:
lowerCamelCase_ = 1.0
for fraction in fraction_list(UpperCAmelCase_ ):
lowerCamelCase_ = Fraction(UpperCAmelCase_ )
result *= frac.denominator / frac.numerator
return int(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 183 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int = 10 ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or n < 0:
raise ValueError("""Invalid input""" )
A__ = 10**n
A__ = 2_8433 * (pow(2 , 783_0457 , UpperCAmelCase_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(1_0) = }""")
| 335 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A ={
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure)
| 226 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple[float, float, float]
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
A__ = end_pointa[0] - end_pointa[0]
A__ = end_pointa[1] - end_pointa[1]
A__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
A__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
A__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
A__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def _snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 335 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE :Dict = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE :str = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
SCREAMING_SNAKE_CASE :List[str] = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[str] ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Value("string" ,id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" ,id="sequence" ) ,id="references" ),
} ) ,codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] ,reference_urls=[
"https://github.com/m-popovic/chrF",
] ,)
def UpperCamelCase_ ( self : List[Any] ,A : Union[str, Any] ,A : Optional[Any] ,A : int = CHRF.CHAR_ORDER ,A : int = CHRF.WORD_ORDER ,A : int = CHRF.BETA ,A : bool = False ,A : bool = False ,A : bool = False ,):
__A = len(references[0] )
if any(len(A ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__A = [[refs[i] for refs in references] for i in range(A )]
__A = CHRF(A ,A ,A ,A ,A ,A )
__A = sb_chrf.corpus_score(A ,A )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 15 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 335 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase: str = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: List[str] = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_lowercase: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 227 |
"""simple docstring"""
import math
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: List[str]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
A__ = n
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # adjacency matrix for weight
A__ = [
[math.inf for j in range(0 , UpperCamelCase )] for i in range(0 , UpperCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = w
def UpperCamelCase ( self: int ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase ( self: int , UpperCamelCase: List[str] , UpperCamelCase: Dict ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 335 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__A = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__A = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__lowerCamelCase = bs[:]
__lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
__lowerCamelCase = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
return pairs
class __lowerCAmelCase ( _lowerCamelCase ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase = json.load(lowerCamelCase__ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
__lowerCamelCase = errors # how to handle errors in decoding
__lowerCamelCase = bytes_to_unicode()
__lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='utf-8' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('\n' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCamelCase = {}
__lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
__lowerCamelCase = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(lowerCamelCase__ ):
try:
__lowerCamelCase = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
__lowerCamelCase = get_pairs(lowerCamelCase__ )
__lowerCamelCase = ' '.join(lowerCamelCase__ )
__lowerCamelCase = word
return word
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = []
for token in re.findall(self.pat , lowerCamelCase__ ):
__lowerCamelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(' ' ) )
return bpe_tokens
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = ''.join(lowerCamelCase__ )
__lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Dict:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '\n' )
__lowerCamelCase = 0
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase = token_index
writer.write(' '.join(lowerCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[Any]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> int:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
__lowerCamelCase = ' ' + text
return (text, kwargs)
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__lowerCamelCase = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCamelCase = len(encoded_inputs['global_attention_mask'] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
__lowerCamelCase = len(lowerCamelCase__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCamelCase = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCamelCase = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 90 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 0 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase__ = logging.getLogger(__name__)
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = np.argmax(UpperCAmelCase_ , axis=1 )
return np.sum(outputs == labels )
def _snake_case ( lowercase__ ):
with open(UpperCAmelCase_ , encoding='utf_8' ) as f:
_lowerCamelCase : Optional[int] = csv.reader(UpperCAmelCase_ )
_lowerCamelCase : Any = []
next(UpperCAmelCase_ ) # skip the first line
for line in tqdm(UpperCAmelCase_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = []
for dataset in encoded_datasets:
_lowerCamelCase : Dict = len(UpperCAmelCase_ )
_lowerCamelCase : Any = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_lowerCamelCase : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
_lowerCamelCase : List[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_lowerCamelCase : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(UpperCAmelCase_ ):
_lowerCamelCase : Tuple = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCamelCase : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCamelCase : Dict = with_conta
_lowerCamelCase : Tuple = with_conta
_lowerCamelCase : Dict = len(UpperCAmelCase_ ) - 1
_lowerCamelCase : str = len(UpperCAmelCase_ ) - 1
_lowerCamelCase : List[Any] = with_conta
_lowerCamelCase : List[Any] = with_conta
_lowerCamelCase : str = mc_label
_lowerCamelCase : Union[str, Any] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(UpperCAmelCase_ ) for t in all_inputs ) )
return tensor_datasets
def _snake_case ( ):
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=UpperCAmelCase_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=UpperCAmelCase_ , default='' )
parser.add_argument('--eval_dataset' , type=UpperCAmelCase_ , default='' )
parser.add_argument('--seed' , type=UpperCAmelCase_ , default=42 )
parser.add_argument('--num_train_epochs' , type=UpperCAmelCase_ , default=3 )
parser.add_argument('--train_batch_size' , type=UpperCAmelCase_ , default=8 )
parser.add_argument('--eval_batch_size' , type=UpperCAmelCase_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=UpperCAmelCase_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=UpperCAmelCase_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=UpperCAmelCase_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=UpperCAmelCase_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=UpperCAmelCase_ , default=6.25E-5 )
parser.add_argument('--warmup_steps' , default=0 , type=UpperCAmelCase_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=UpperCAmelCase_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=UpperCAmelCase_ , default=0.0_1 )
parser.add_argument('--lm_coef' , type=UpperCAmelCase_ , default=0.9 )
parser.add_argument('--n_valid' , type=UpperCAmelCase_ , default=374 )
parser.add_argument('--server_ip' , type=UpperCAmelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCAmelCase_ , default='' , help='Can be used for distant debugging.' )
_lowerCamelCase : Any = parser.parse_args()
print(UpperCAmelCase_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_lowerCamelCase : Dict = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCamelCase : List[Any] = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_lowerCamelCase : int = ['_start_', '_delimiter_', '_classify_']
_lowerCamelCase : Optional[Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(UpperCAmelCase_ )
_lowerCamelCase : int = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
_lowerCamelCase : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(UpperCAmelCase_ ) )
model.to(UpperCAmelCase_ )
# Load and encode the datasets
def tokenize_and_encode(lowercase__ ):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return obj
return [tokenize_and_encode(UpperCAmelCase_ ) for o in obj]
logger.info('Encoding dataset...' )
_lowerCamelCase : Dict = load_rocstories_dataset(args.train_dataset )
_lowerCamelCase : Union[str, Any] = load_rocstories_dataset(args.eval_dataset )
_lowerCamelCase : int = (train_dataset, eval_dataset)
_lowerCamelCase : Tuple = tokenize_and_encode(UpperCAmelCase_ )
# Compute the max input length for the Transformer
_lowerCamelCase : Union[str, Any] = model.config.n_positions // 2 - 2
_lowerCamelCase : List[str] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_lowerCamelCase : Union[str, Any] = min(UpperCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_lowerCamelCase : str = pre_process_datasets(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , *UpperCAmelCase_ )
_lowerCamelCase, _lowerCamelCase : int = tensor_datasets[0], tensor_datasets[1]
_lowerCamelCase : Dict = TensorDataset(*UpperCAmelCase_ )
_lowerCamelCase : List[Any] = RandomSampler(UpperCAmelCase_ )
_lowerCamelCase : Union[str, Any] = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.train_batch_size )
_lowerCamelCase : Union[str, Any] = TensorDataset(*UpperCAmelCase_ )
_lowerCamelCase : List[str] = SequentialSampler(UpperCAmelCase_ )
_lowerCamelCase : Union[str, Any] = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_lowerCamelCase : List[str] = args.max_steps
_lowerCamelCase : Dict = args.max_steps // (len(UpperCAmelCase_ ) // args.gradient_accumulation_steps) + 1
else:
_lowerCamelCase : int = len(UpperCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs
_lowerCamelCase : Any = list(model.named_parameters() )
_lowerCamelCase : Union[str, Any] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_lowerCamelCase : Optional[Any] = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_lowerCamelCase : Tuple = AdamW(UpperCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon )
_lowerCamelCase : List[Any] = get_linear_schedule_with_warmup(
UpperCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=UpperCAmelCase_ )
if args.do_train:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Tuple = tqdm(UpperCAmelCase_ , desc='Training' )
for step, batch in enumerate(UpperCAmelCase_ ):
_lowerCamelCase : Optional[Any] = tuple(t.to(UpperCAmelCase_ ) for t in batch )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = batch
_lowerCamelCase : Tuple = model(UpperCAmelCase_ , mc_token_ids=UpperCAmelCase_ , lm_labels=UpperCAmelCase_ , mc_labels=UpperCAmelCase_ )
_lowerCamelCase : str = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_lowerCamelCase : Any = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_lowerCamelCase : Optional[Any] = 'Training loss: {:.2e} lr: {:.2e}'.format(UpperCAmelCase_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_lowerCamelCase : Optional[int] = model.module if hasattr(UpperCAmelCase_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_lowerCamelCase : List[str] = os.path.join(args.output_dir , UpperCAmelCase_ )
_lowerCamelCase : Any = os.path.join(args.output_dir , UpperCAmelCase_ )
torch.save(model_to_save.state_dict() , UpperCAmelCase_ )
model_to_save.config.to_json_file(UpperCAmelCase_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_lowerCamelCase : Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_lowerCamelCase : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(UpperCAmelCase_ )
if args.do_eval:
model.eval()
_lowerCamelCase, _lowerCamelCase : int = 0, 0
_lowerCamelCase, _lowerCamelCase : Optional[int] = 0, 0
for batch in tqdm(UpperCAmelCase_ , desc='Evaluating' ):
_lowerCamelCase : List[Any] = tuple(t.to(UpperCAmelCase_ ) for t in batch )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = batch
with torch.no_grad():
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = model(
UpperCAmelCase_ , mc_token_ids=UpperCAmelCase_ , lm_labels=UpperCAmelCase_ , mc_labels=UpperCAmelCase_ )
_lowerCamelCase : str = mc_logits.detach().cpu().numpy()
_lowerCamelCase : Optional[int] = mc_labels.to('cpu' ).numpy()
_lowerCamelCase : Optional[Any] = accuracy(UpperCAmelCase_ , UpperCAmelCase_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_lowerCamelCase : str = eval_loss / nb_eval_steps
_lowerCamelCase : int = eval_accuracy / nb_eval_examples
_lowerCamelCase : Any = tr_loss / nb_tr_steps if args.do_train else None
_lowerCamelCase : Union[str, Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_lowerCamelCase : Union[str, Any] = os.path.join(args.output_dir , 'eval_results.txt' )
with open(UpperCAmelCase_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , UpperCAmelCase_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main() | 96 |
"""simple docstring"""
import math
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCAmelCase_ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 335 | 0 |
"""simple docstring"""
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase ( _lowerCamelCase , _lowerCamelCase):
@register_to_config
def __init__( self : str , _lowerCamelCase : int = 1_28 , _lowerCamelCase : int = 2_56 , _lowerCamelCase : float = 20_00.0 , _lowerCamelCase : int = 7_68 , _lowerCamelCase : int = 12 , _lowerCamelCase : int = 12 , _lowerCamelCase : int = 64 , _lowerCamelCase : int = 20_48 , _lowerCamelCase : float = 0.1 , ):
"""simple docstring"""
super().__init__()
A_ : int = nn.Sequential(
nn.Linear(_lowerCamelCase , d_model * 4 , bias=_lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_lowerCamelCase ) , nn.SiLU() , )
A_ : Any = nn.Embedding(_lowerCamelCase , _lowerCamelCase )
A_ : Optional[Any] = False
A_ : Tuple = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
A_ : int = nn.Dropout(p=_lowerCamelCase )
A_ : str = nn.ModuleList()
for lyr_num in range(_lowerCamelCase ):
# FiLM conditional T5 decoder
A_ : Dict = DecoderLayer(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase )
self.decoders.append(_lowerCamelCase )
A_ : Optional[int] = TaLayerNorm(_lowerCamelCase )
A_ : Tuple = nn.Dropout(p=_lowerCamelCase )
A_ : Dict = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
def a_ ( self : Dict , _lowerCamelCase : int , _lowerCamelCase : int ):
"""simple docstring"""
A_ : Tuple = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def a_ ( self : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ , A_ , A_ : List[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A_ : Dict = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A_ : Optional[Any] = self.conditioning_emb(_lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A_ : Optional[int] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A_ : Dict = torch.broadcast_to(
torch.arange(_lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A_ : Dict = self.position_encoding(_lowerCamelCase )
A_ : Optional[Any] = self.continuous_inputs_projection(_lowerCamelCase )
inputs += position_encodings
A_ : Dict = self.dropout(_lowerCamelCase )
# decoder: No padding present.
A_ : Optional[int] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A_ : int = [(x, self.encoder_decoder_mask(_lowerCamelCase , _lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A_ : Any = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A_ : str = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A_ : Optional[Any] = lyr(
_lowerCamelCase , conditioning_emb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , )[0]
A_ : List[Any] = self.decoder_norm(_lowerCamelCase )
A_ : Optional[int] = self.post_dropout(_lowerCamelCase )
A_ : Union[str, Any] = self.spec_out(_lowerCamelCase )
return spec_out
class lowercase ( nn.Module):
def __init__( self : int , _lowerCamelCase : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Tuple=1E-6 ):
"""simple docstring"""
super().__init__()
A_ : List[Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_lowerCamelCase , d_kv=_lowerCamelCase , num_heads=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase ) )
def a_ ( self : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=None , ):
"""simple docstring"""
A_ : List[str] = self.layer[0](
_lowerCamelCase , conditioning_emb=_lowerCamelCase , attention_mask=_lowerCamelCase , )
if encoder_hidden_states is not None:
A_ : Optional[int] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
A_ : List[Any] = self.layer[1](
_lowerCamelCase , key_value_states=_lowerCamelCase , attention_mask=_lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
A_ : Optional[int] = self.layer[-1](_lowerCamelCase , _lowerCamelCase )
return (hidden_states,)
class lowercase ( nn.Module):
def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__()
A_ : Tuple = TaLayerNorm(_lowerCamelCase )
A_ : Optional[Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase )
A_ : List[Any] = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase )
A_ : Dict = nn.Dropout(_lowerCamelCase )
def a_ ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=None , _lowerCamelCase : str=None , ):
"""simple docstring"""
A_ : Optional[int] = self.layer_norm(_lowerCamelCase )
if conditioning_emb is not None:
A_ : Optional[int] = self.FiLMLayer(_lowerCamelCase , _lowerCamelCase )
# Self-attention block
A_ : Dict = self.attention(_lowerCamelCase )
A_ : Optional[Any] = hidden_states + self.dropout(_lowerCamelCase )
return hidden_states
class lowercase ( nn.Module):
def __init__( self : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : int ):
"""simple docstring"""
super().__init__()
A_ : Dict = Attention(query_dim=_lowerCamelCase , heads=_lowerCamelCase , dim_head=_lowerCamelCase , out_bias=_lowerCamelCase , scale_qk=_lowerCamelCase )
A_ : Tuple = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase )
A_ : int = nn.Dropout(_lowerCamelCase )
def a_ ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=None , ):
"""simple docstring"""
A_ : Tuple = self.layer_norm(_lowerCamelCase )
A_ : str = self.attention(
_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
A_ : Dict = hidden_states + self.dropout(_lowerCamelCase )
return layer_output
class lowercase ( nn.Module):
def __init__( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : List[str] ):
"""simple docstring"""
super().__init__()
A_ : List[str] = TaDenseGatedActDense(d_model=_lowerCamelCase , d_ff=_lowerCamelCase , dropout_rate=_lowerCamelCase )
A_ : Tuple = TaFiLMLayer(in_features=d_model * 4 , out_features=_lowerCamelCase )
A_ : List[Any] = TaLayerNorm(_lowerCamelCase , eps=_lowerCamelCase )
A_ : List[str] = nn.Dropout(_lowerCamelCase )
def a_ ( self : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]=None ):
"""simple docstring"""
A_ : Any = self.layer_norm(_lowerCamelCase )
if conditioning_emb is not None:
A_ : str = self.film(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = self.DenseReluDense(_lowerCamelCase )
A_ : List[str] = hidden_states + self.dropout(_lowerCamelCase )
return hidden_states
class lowercase ( nn.Module):
def __init__( self : str , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Tuple ):
"""simple docstring"""
super().__init__()
A_ : Tuple = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
A_ : int = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
A_ : str = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
A_ : Optional[int] = nn.Dropout(_lowerCamelCase )
A_ : List[str] = NewGELUActivation()
def a_ ( self : Any , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = self.act(self.wi_a(_lowerCamelCase ) )
A_ : str = self.wi_a(_lowerCamelCase )
A_ : Optional[int] = hidden_gelu * hidden_linear
A_ : Dict = self.dropout(_lowerCamelCase )
A_ : str = self.wo(_lowerCamelCase )
return hidden_states
class lowercase ( nn.Module):
def __init__( self : Any , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=1E-6 ):
"""simple docstring"""
super().__init__()
A_ : Any = nn.Parameter(torch.ones(_lowerCamelCase ) )
A_ : int = eps
def a_ ( self : Any , _lowerCamelCase : Optional[int] ):
"""simple docstring"""
A_ : str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_lowerCamelCase )
A_ : Union[str, Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A_ : Optional[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase ( nn.Module):
def a_ ( self : Optional[Any] , _lowerCamelCase : torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(_lowerCamelCase , 3.0 )) ))
class lowercase ( nn.Module):
def __init__( self : Dict , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
super().__init__()
A_ : int = nn.Linear(_lowerCamelCase , out_features * 2 , bias=_lowerCamelCase )
def a_ ( self : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
"""simple docstring"""
A_ : Union[str, Any] = self.scale_bias(_lowerCamelCase )
A_ , A_ : Optional[Any] = torch.chunk(_lowerCamelCase , 2 , -1 )
A_ : Any = x * (1 + scale) + shift
return x
| 167 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase )
return image
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
A__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase )
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
def extract(*UpperCamelCase: List[str] , **UpperCamelCase: Any ):
class a :
"""simple docstring"""
def __init__( self: Any ):
"""simple docstring"""
A__ = torch.ones([0] )
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
self.pixel_values.to(UpperCamelCase )
return self
return Out()
return extract
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
A__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , )
A__ = output.images
A__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , return_dict=UpperCamelCase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.dummy_cond_unet
A__ = PNDMScheduler(skip_prk_steps=UpperCamelCase )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A__ = 77
A__ = self.dummy_image.to(UpperCamelCase )
# put models in fp16
A__ = unet.half()
A__ = vae.half()
A__ = bert.half()
# make sure here that pndm scheduler skips prk
A__ = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase , scheduler=UpperCamelCase , vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=self.dummy_extractor , )
A__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase )
A__ = alt_pipe.to(UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase )
A__ = """A painting of a squirrel eating a burger"""
A__ = torch.manual_seed(0 )
A__ = alt_pipe(
[prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" , image=UpperCamelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ = init_image.resize((7_60, 5_04) )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
A__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
A__ = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A__ = init_image.resize((7_68, 5_12) )
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A__ = """BAAI/AltDiffusion"""
A__ = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase , safety_checker=UpperCamelCase , )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
pipe.enable_attention_slicing()
A__ = """A fantasy landscape, trending on artstation"""
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=UpperCamelCase , image=UpperCamelCase , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 335 | 0 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_SCREAMING_SNAKE_CASE = Lock()
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowerCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowerCAmelCase = min(UpperCAmelCase_ , UpperCAmelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCAmelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowerCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowerCAmelCase = max(UpperCAmelCase_ , UpperCAmelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCAmelCase_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowerCAmelCase = Pipe()
_lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowerCAmelCase = temp_rs
_lowerCAmelCase = temp_rr
for i in range(1 , len(UpperCAmelCase_ ) - 1 ):
_lowerCAmelCase = Pipe()
_lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowerCAmelCase = temp_rs
_lowerCAmelCase = temp_rr
process_array_.append(
Process(
target=UpperCAmelCase_ , args=(
len(UpperCAmelCase_ ) - 1,
arr[len(UpperCAmelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCAmelCase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCAmelCase_ ) ):
_lowerCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __a():
'''simple docstring'''
_lowerCAmelCase = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*UpperCAmelCase_ )
_lowerCAmelCase = odd_even_transposition(UpperCAmelCase_ )
print("Sorted List\n" )
print(*UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 158 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = FileLock(str(tmpdir / """foo.lock""" ) )
A__ = 0.01
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
A__ = time.time()
locka.acquire(UpperCAmelCase_ )
assert time.time() - _start > timeout
def _snake_case ( UpperCAmelCase_ : List[Any] ):
A__ = """a""" * 1000 + """.lock"""
A__ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(UpperCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
A__ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(UpperCAmelCase_ ):
locka.acquire(0 )
| 335 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase__ =(3, 9, -11, 0, 7, 5, 1, -1)
lowercase__ =(4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Any = 42
_SCREAMING_SNAKE_CASE : List[str] = 42
class UpperCamelCase__ :
def __init__(self : Optional[Any] , snake_case_ : Iterable[int] ):
__a : str = None
for i in sorted(snake_case_ , reverse=snake_case_ ):
__a : str = Node(snake_case_ , self.head )
def __iter__(self : Dict ):
__a : List[str] = self.head
while node:
yield node.data
__a : Any = node.next_node
def __len__(self : List[Any] ):
return sum(1 for _ in self )
def __str__(self : Union[str, Any] ):
return " -> ".join([str(snake_case_ ) for node in self] )
def __UpperCamelCase ( lowerCAmelCase__ : SortedLinkedList , lowerCAmelCase__ : SortedLinkedList ):
return SortedLinkedList(list(UpperCAmelCase_ ) + list(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ =SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 216 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "dandelin/vilt-b32-finetuned-vqa"
UpperCAmelCase = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
UpperCAmelCase = "image_qa"
UpperCAmelCase = AutoProcessor
UpperCAmelCase = AutoModelForVisualQuestionAnswering
UpperCAmelCase = ["image", "text"]
UpperCAmelCase = ["text"]
def __init__( self: List[str] , *UpperCamelCase: Dict , **UpperCamelCase: List[str] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: str , UpperCamelCase: "Image" , UpperCamelCase: str ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase , UpperCamelCase , return_tensors="""pt""" )
def UpperCamelCase ( self: str , UpperCamelCase: str ):
"""simple docstring"""
with torch.no_grad():
return self.model(**UpperCamelCase ).logits
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 335 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : str=3 , lowerCAmelCase : int=30 , lowerCAmelCase : int=400 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=True , lowerCAmelCase : int=[0.5, 0.5, 0.5] , lowerCAmelCase : Any=[0.5, 0.5, 0.5] , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[Any]=1 / 255 , lowerCAmelCase : Tuple=True , ):
lowerCAmelCase = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = num_channels
lowerCAmelCase = min_resolution
lowerCAmelCase = max_resolution
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean
lowerCAmelCase = image_std
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_pad
def __lowercase ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowercase ( self : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int=False ):
if not batched:
lowerCAmelCase = image_inputs[0]
if isinstance(lowerCAmelCase , Image.Image ):
lowerCAmelCase , lowerCAmelCase = image.size
else:
lowerCAmelCase , lowerCAmelCase = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase = self.size["""shortest_edge"""]
lowerCAmelCase = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase = self.size["""shortest_edge"""]
lowerCAmelCase = self.size["""shortest_edge"""]
else:
lowerCAmelCase = []
for image in image_inputs:
lowerCAmelCase , lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[0] )[0]
lowerCAmelCase = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , unittest.TestCase ):
_a = YolosImageProcessor if is_vision_available() else None
def __lowercase ( self : Optional[int] ):
lowerCAmelCase = YolosImageProcessingTester(self )
@property
def __lowercase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase , """size""" ) )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
lowerCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
def __lowercase ( self : str ):
pass
def __lowercase ( self : str ):
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
lowerCAmelCase = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : Tuple ):
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : str ):
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCAmelCase = image_processing(lowerCAmelCase , return_tensors="""pt""" ).pixel_values
lowerCAmelCase , lowerCAmelCase = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : str ):
lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase = self.image_processing_class(do_resize=lowerCAmelCase , do_normalize=lowerCAmelCase , do_rescale=lowerCAmelCase )
# create random PyTorch tensors
lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowerCAmelCase = image_processing_a.pad(lowerCAmelCase , return_tensors="""pt""" )
lowerCAmelCase = image_processing_a(lowerCAmelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def __lowercase ( self : str ):
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = {"""image_id""": 3_9769, """annotations""": target}
# encode them
lowerCAmelCase = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
lowerCAmelCase = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify area
lowerCAmelCase = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase ) )
# verify boxes
lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase , atol=1e-3 ) )
# verify image_id
lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase ) )
# verify is_crowd
lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase ) )
# verify class_labels
lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase ) )
# verify orig_size
lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase ) )
# verify size
lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase ) )
@slow
def __lowercase ( self : int ):
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
lowerCAmelCase = json.loads(f.read() )
lowerCAmelCase = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
lowerCAmelCase = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase = YolosImageProcessor(format="""coco_panoptic""" )
lowerCAmelCase = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , masks_path=lowerCAmelCase , return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify area
lowerCAmelCase = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase ) )
# verify boxes
lowerCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase )
lowerCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase , atol=1e-3 ) )
# verify image_id
lowerCAmelCase = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase ) )
# verify is_crowd
lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase ) )
# verify class_labels
lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase ) )
# verify masks
lowerCAmelCase = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCAmelCase )
# verify orig_size
lowerCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase ) )
# verify size
lowerCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase ) )
| 155 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ):
"""simple docstring"""
A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ):
"""simple docstring"""
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase , Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["""shortest_edge"""] * h / w )
A__ = self.size["""shortest_edge"""]
elif w > h:
A__ = self.size["""shortest_edge"""]
A__ = int(self.size["""shortest_edge"""] * w / h )
else:
A__ = self.size["""shortest_edge"""]
A__ = self.size["""shortest_edge"""]
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0]
A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = YolosImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(UpperCamelCase , """size""" ) )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = self.image_processing_class(**self.image_processor_dict )
A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" )
A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
@slow
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
A__ = json.loads(f.read() )
A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
A__ = YolosImageProcessor(format="""coco_panoptic""" )
A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) )
# verify area
A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase )
A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
| 335 | 0 |
"""simple docstring"""
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE =shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE ={'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class UpperCamelCase ( enum.Enum ):
lowercase = 0
lowercase = 1
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any="" ):
sys.stdout.write(str(UpperCAmelCase_ ) + end )
sys.stdout.flush()
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]="" ):
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , UpperCAmelCase_ )
def lowercase__( ):
forceWrite('\r' )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowercase__( ):
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def lowercase__( ):
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 213 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : Dict ): # noqa: E741
A__ = len(UpperCAmelCase_ )
A__ = 0
A__ = [0] * n
A__ = [False] * n
A__ = [False] * n
def dfs(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
if parent == root:
out_edge_count += 1
A__ = True
A__ = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
A__ = True
# AP found via cycle
if at == low[to]:
A__ = True
else:
A__ = min(low[at] , UpperCAmelCase_ )
return out_edge_count
for i in range(UpperCAmelCase_ ):
if not visited[i]:
A__ = 0
A__ = dfs(UpperCAmelCase_ , UpperCAmelCase_ , -1 , UpperCAmelCase_ )
A__ = out_edge_count > 1
for x in range(len(UpperCAmelCase_ ) ):
if is_art[x] is True:
print(UpperCAmelCase_ )
# Adjacency list of graph
SCREAMING_SNAKE_CASE_ : Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 335 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase__ ( ) -> Optional[int]:
lowerCamelCase_ = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=UpperCAmelCase_ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=UpperCAmelCase_ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=UpperCAmelCase_ )
return parser.parse_args()
def lowerCamelCase__ ( ) -> int:
lowerCamelCase_ = parse_args()
# Import training_script as a module.
lowerCamelCase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase_ = script_fpath.stem
lowerCamelCase_ = importlib.import_module(UpperCAmelCase_ )
# Patch sys.argv
lowerCamelCase_ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 183 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
A__ = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
A__ = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 335 | 0 |
def a ( _UpperCAmelCase : list ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 0
while len(UpperCAmelCase_ ) > 1:
__UpperCAmelCase : List[str] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__UpperCAmelCase : Union[str, Any] = files.index(min(UpperCAmelCase_ ) )
temp += files[min_index]
files.pop(UpperCAmelCase_ )
files.append(UpperCAmelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Union[str, Any] , UpperCamelCase: Any ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = RobertaEmbeddings(UpperCamelCase )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = RobertaConfig
UpperCAmelCase = "roberta"
def __init__( self: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = config.num_hidden_layers
A__ = DeeRobertaModel(UpperCamelCase )
A__ = nn.Dropout(config.hidden_dropout_prob )
A__ = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[int]=None , UpperCamelCase: str=None , UpperCamelCase: str=None , UpperCamelCase: List[str]=None , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[int]=-1 , UpperCamelCase: Optional[Any]=False , ):
"""simple docstring"""
A__ = self.num_layers
try:
A__ = self.roberta(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , position_ids=UpperCamelCase , head_mask=UpperCamelCase , inputs_embeds=UpperCamelCase , )
A__ = outputs[1]
A__ = self.dropout(UpperCamelCase )
A__ = self.classifier(UpperCamelCase )
A__ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A__ = e.message
A__ = e.exit_layer
A__ = outputs[0]
if not self.training:
A__ = entropy(UpperCamelCase )
A__ = []
A__ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
A__ = []
for highway_exit in outputs[-1]:
A__ = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A__ = MSELoss()
A__ = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
A__ = CrossEntropyLoss()
A__ = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase )
if train_highway:
A__ = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A__ = (loss,) + outputs
if not self.training:
A__ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A__ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 335 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : int ,*A : Optional[Any] ,**A : str ):
super().__init__(*A ,**A )
requires_backends(self ,"vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any]=None ):
__A = {}
if top_k is not None:
__A = top_k
return {}, {}, postprocess_params
def __call__( self : List[str] ,A : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**A : List[Any] ):
return super().__call__(A ,**A )
def UpperCamelCase_ ( self : Tuple ,A : int ):
__A = load_image(A )
__A = self.image_processor(images=A ,return_tensors=self.framework )
return model_inputs
def UpperCamelCase_ ( self : str ,A : List[str] ):
__A = self.model(**A )
return model_outputs
def UpperCamelCase_ ( self : List[Any] ,A : List[Any] ,A : Optional[Any]=5 ):
if top_k > self.model.config.num_labels:
__A = self.model.config.num_labels
if self.framework == "pt":
__A = model_outputs.logits.softmax(-1 )[0]
__A , __A = probs.topk(A )
elif self.framework == "tf":
__A = stable_softmax(model_outputs.logits ,axis=-1 )[0]
__A = tf.math.top_k(A ,k=A )
__A , __A = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__A = scores.tolist()
__A = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A ,A )]
| 15 |
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
SCREAMING_SNAKE_CASE_ : int = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
SCREAMING_SNAKE_CASE_ : List[Any] = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _snake_case ( ):
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , bootstrap_aggregation=UpperCAmelCase_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def _snake_case ( ):
A__ = """rougeLsum"""
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def _snake_case ( ):
A__ = ["""rouge1""", """rouge2""", """rougeL"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ , rouge_keys=UpperCAmelCase_ )
assert score_sep == score_no_sep
def _snake_case ( ):
A__ = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
A__ = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ ) == calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , newline_sep=UpperCAmelCase_ )
def _snake_case ( ):
A__ = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
A__ = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase_ )["""rougeLsum"""]
A__ = calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def _snake_case ( ):
A__ = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
A__ = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase_ )
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
| 335 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
a = load_dataset("ashraq/esc50" )
a = dataset["train"]["audio"][-1]["array"]
a = audio_classifier(lowerCamelCase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
@slow
@require_torch
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
a = load_dataset("ashraq/esc50" )
a = dataset["train"]["audio"][-1]["array"]
a = audio_classifier(lowerCamelCase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
a = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
a = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
| 227 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'MobileNetV1Config'
# Base docstring
SCREAMING_SNAKE_CASE_ : str = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : List[str] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'google/mobilenet_v1_1.0_224'
SCREAMING_SNAKE_CASE_ : Tuple = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict=None ):
A__ = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = model.mobilenet_va
else:
A__ = model
A__ = """MobilenetV1/Conv2d_0/"""
A__ = backbone.conv_stem.convolution.weight
A__ = backbone.conv_stem.normalization.bias
A__ = backbone.conv_stem.normalization.weight
A__ = backbone.conv_stem.normalization.running_mean
A__ = backbone.conv_stem.normalization.running_var
for i in range(13 ):
A__ = i + 1
A__ = i * 2
A__ = backbone.layer[pt_index]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
A__ = backbone.layer[pt_index + 1]
A__ = F"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
A__ = pointer.convolution.weight
A__ = pointer.normalization.bias
A__ = pointer.normalization.weight
A__ = pointer.normalization.running_mean
A__ = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
A__ = model.classifier.weight
A__ = model.classifier.bias
return tf_to_pt_map
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
A__ = tf.train.list_variables(UpperCAmelCase_ )
A__ = {}
for name, shape in init_vars:
logger.info(F"""Loading TF weight {name} with shape {shape}""" )
A__ = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = array
# Build TF to PyTorch weights loading map
A__ = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"""Importing {name}""" )
if name not in tf_weights:
logger.info(F"""{name} not in tf pre-trained weights, skipping""" )
continue
A__ = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
A__ = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
A__ = array.squeeze().transpose()
else:
A__ = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(F"""Initialize PyTorch weight {name} {array.shape}""" )
A__ = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp""" , UpperCAmelCase_ )
tf_weights.pop(name + """/RMSProp_1""" , UpperCAmelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" , UpperCAmelCase_ )
logger.info(F"""Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}""" )
return model
def _snake_case ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : nn.Convad ):
A__ , A__ = features.shape[-2:]
A__ , A__ = conv_layer.stride
A__ , A__ = conv_layer.kernel_size
if in_height % stride_height == 0:
A__ = max(kernel_height - stride_height , 0 )
else:
A__ = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
A__ = max(kernel_width - stride_width , 0 )
else:
A__ = max(kernel_width - (in_width % stride_width) , 0 )
A__ = pad_along_width // 2
A__ = pad_along_width - pad_left
A__ = pad_along_height // 2
A__ = pad_along_height - pad_top
A__ = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , """constant""" , 0.0 )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: bool = False , UpperCamelCase: Optional[bool] = True , UpperCamelCase: Optional[bool or str] = True , ):
"""simple docstring"""
super().__init__()
A__ = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
A__ = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
A__ = nn.Convad(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , stride=UpperCamelCase , padding=UpperCamelCase , groups=UpperCamelCase , bias=UpperCamelCase , padding_mode="""zeros""" , )
if use_normalization:
A__ = nn.BatchNormad(
num_features=UpperCamelCase , eps=config.layer_norm_eps , momentum=0.9_997 , affine=UpperCamelCase , track_running_stats=UpperCamelCase , )
else:
A__ = None
if use_activation:
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCamelCase ):
A__ = ACTaFN[config.hidden_act]
else:
A__ = config.hidden_act
else:
A__ = None
def UpperCamelCase ( self: List[Any] , UpperCamelCase: torch.Tensor ):
"""simple docstring"""
if self.config.tf_padding:
A__ = apply_tf_padding(UpperCamelCase , self.convolution )
A__ = self.convolution(UpperCamelCase )
if self.normalization is not None:
A__ = self.normalization(UpperCamelCase )
if self.activation is not None:
A__ = self.activation(UpperCamelCase )
return features
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = MobileNetVaConfig
UpperCAmelCase = load_tf_weights_in_mobilenet_va
UpperCAmelCase = "mobilenet_v1"
UpperCAmelCase = "pixel_values"
UpperCAmelCase = False
def UpperCamelCase ( self: Any , UpperCamelCase: Union[nn.Linear, nn.Convad] ):
"""simple docstring"""
if isinstance(UpperCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE_ : Optional[Any] = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , UpperCamelCase: MobileNetVaConfig , UpperCamelCase: bool = True ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config
A__ = 32
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
A__ = MobileNetVaConvLayer(
UpperCamelCase , in_channels=config.num_channels , out_channels=UpperCamelCase , kernel_size=3 , stride=2 , )
A__ = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
A__ = nn.ModuleList()
for i in range(13 ):
A__ = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
A__ = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=3 , stride=strides[i] , groups=UpperCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCamelCase , in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=1 , ) )
A__ = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A__ = self.conv_stem(UpperCamelCase )
A__ = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
A__ = layer_module(UpperCamelCase )
if output_hidden_states:
A__ = all_hidden_states + (hidden_states,)
A__ = hidden_states
if self.pooler is not None:
A__ = torch.flatten(self.pooler(UpperCamelCase ) , start_dim=1 )
else:
A__ = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase , pooler_output=UpperCamelCase , hidden_states=UpperCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", _lowerCamelCase, )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Union[str, Any] , UpperCamelCase: MobileNetVaConfig ):
"""simple docstring"""
super().__init__(UpperCamelCase )
A__ = config.num_labels
A__ = MobileNetVaModel(UpperCamelCase )
A__ = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
A__ = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCamelCase )
A__ = nn.Linear(UpperCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , UpperCamelCase: Optional[torch.Tensor] = None , UpperCamelCase: Optional[bool] = None , ):
"""simple docstring"""
A__ = return_dict if return_dict is not None else self.config.use_return_dict
A__ = self.mobilenet_va(UpperCamelCase , output_hidden_states=UpperCamelCase , return_dict=UpperCamelCase )
A__ = outputs.pooler_output if return_dict else outputs[1]
A__ = self.classifier(self.dropout(UpperCamelCase ) )
A__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ = """single_label_classification"""
else:
A__ = """multi_label_classification"""
if self.config.problem_type == "regression":
A__ = MSELoss()
if self.num_labels == 1:
A__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
elif self.config.problem_type == "single_label_classification":
A__ = CrossEntropyLoss()
A__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ = BCEWithLogitsLoss()
A__ = loss_fct(UpperCamelCase , UpperCamelCase )
if not return_dict:
A__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , )
| 335 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : set ):
A__ , A__ = len(UpperCAmelCase_ ), len(grid[0] )
if (
min(UpperCAmelCase_ , UpperCAmelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ = 0
count += depth_first_search(UpperCAmelCase_ , row + 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , row - 1 , UpperCAmelCase_ , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col + 1 , UpperCAmelCase_ )
count += depth_first_search(UpperCAmelCase_ , UpperCAmelCase_ , col - 1 , UpperCAmelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.