code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = torch.nn.Linear(1_0 , 1_0 )
UpperCamelCase_ : List[str] = torch.optim.SGD(model.parameters() , 0.1 )
UpperCamelCase_ : List[Any] = Accelerator()
UpperCamelCase_ : int = accelerator.prepare(__a )
try:
pickle.loads(pickle.dumps(__a ) )
except Exception as e:
self.fail(f"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 175 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
__a : Optional[Any] = tmp_path / 'file.csv'
__a : Union[str, Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : str = tmp_path / 'malformed_file.csv'
__a : int = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = tmp_path / 'csv_with_image.csv'
__a : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Union[str, Any] = tmp_path / 'csv_with_label.csv'
__a : Any = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
@pytest.fixture
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Dict = tmp_path / 'csv_with_int_list.csv'
__a : Tuple = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(_SCREAMING_SNAKE_CASE , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return str(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] ):
__a : int = Csv()
__a : str = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_SCREAMING_SNAKE_CASE , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(_SCREAMING_SNAKE_CASE ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1]
__a : Tuple = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__a : Any = csv._generate_tables([[csv_file_with_image]] )
__a : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__a : Any = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as f:
__a : Tuple = f.read().splitlines()[1:]
__a : Optional[int] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__a : List[str] = csv._generate_tables([[csv_file_with_label]] )
__a : Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__a : int = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(_SCREAMING_SNAKE_CASE ) for label in labels]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda _SCREAMING_SNAKE_CASE : [int(_SCREAMING_SNAKE_CASE ) for i in x.split()]} )
__a : Any = csv._generate_tables([[csv_file_with_int_list]] )
__a : Any = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__a : Tuple = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 27 | 0 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase : Dict = False
try:
_lowercase : Union[str, Any] = _is_package_available("google.colab")
except ModuleNotFoundError:
pass
@input.register
class __magic_name__ :
def __init__( self : Optional[int] , lowercase_ : List[str] = None , lowercase_ : Any = [] ):
lowercase_ : Optional[int] = 0
lowercase_ : Optional[int] = choices
lowercase_ : List[Any] = prompt
if sys.platform == "win32":
lowercase_ : List[Any] = '*'
else:
lowercase_ : Union[str, Any] = '➔ '
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any , lowercase_ : Optional[int] = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , __a )
else:
forceWrite(self.choices[index] , __a )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str ):
if index == self.position:
forceWrite(f''' {self.arrow_char} ''' )
self.write_choice(__a )
else:
forceWrite(f''' {self.choices[index]}''' )
reset_cursor()
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Tuple , lowercase_ : str = 1 ):
lowercase_ : List[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__a )
move_cursor(__a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__a )] for number in range(10 )] )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = int(chr(self.current_selection ) )
lowercase_ : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , __a )
else:
return
else:
return
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
lowercase_ : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__a )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ : List[Any] = int(builtins.input() )
except ValueError:
lowercase_ : Any = default_choice
else:
lowercase_ : List[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(__a , """\n""" )
return choice
| 239 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
SCREAMING_SNAKE_CASE__ = f"""https://www.google.com/search?q={query}&num=100"""
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)['url'][0]
webbrowser.open(link)
| 150 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 0 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class A__ :
def __init__( self , __magic_name__ , __magic_name__=1_4 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=9_9 , __magic_name__=3_2 , __magic_name__=5 , __magic_name__=4 , __magic_name__=3_7 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_1_2 , __magic_name__=1_6 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ):
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : str = batch_size
lowerCamelCase : Optional[int] = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_token_type_ids
lowerCamelCase : Optional[int] = use_input_mask
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : Dict = use_mc_token_ids
lowerCamelCase : Union[str, Any] = vocab_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : int = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : str = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : int = type_sequence_label_size
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : Optional[Any] = num_labels
lowerCamelCase : List[str] = num_choices
lowerCamelCase : Any = scope
lowerCamelCase : List[Any] = self.vocab_size - 1
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Tuple = None
if self.use_token_type_ids:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Optional[int] = None
if self.use_mc_token_ids:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCamelCase : List[Any] = None
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Optional[int] = self.get_config()
lowerCamelCase : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Optional[int] = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
lowerCamelCase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Dict = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
lowerCamelCase
) : Any = config_and_inputs
lowerCamelCase : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , *__magic_name__ ):
lowerCamelCase : Any = self.num_labels
lowerCamelCase : Union[str, Any] = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class A__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase):
_UpperCAmelCase : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_UpperCAmelCase : Dict = (CTRLLMHeadModel,) if is_torch_available() else ()
_UpperCAmelCase : Tuple = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Any = False
_UpperCAmelCase : Any = False
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = CTRLModelTester(self )
lowerCamelCase : str = ConfigTester(self , config_class=__a , n_embd=3_7 )
def UpperCamelCase__ ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase__ ( self ):
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""The model doesn\'t support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ ( self ):
pass
@require_torch
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCamelCase__ ( self ):
lowerCamelCase : int = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__a )
lowerCamelCase : Union[str, Any] = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=__a ) # Legal the president is
lowerCamelCase : List[Any] = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCamelCase : List[str] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 287 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
__a : Tuple = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__a : List[str] = components[:-1] + [test_fn.replace('.py' , '' )]
__a : Optional[Any] = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = get_module_path(_SCREAMING_SNAKE_CASE )
__a : Dict = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : List[str] = []
__a : List[str] = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : Any = []
__a : str = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
__a : int = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__a : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : Tuple = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
__a : List[Any] = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__a : List[str] = test.model_tester.__class__
return model_tester
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] ):
__a : List[Any] = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = []
for test_class in test_classes:
__a : Any = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] ):
__a : str = get_test_classes(_SCREAMING_SNAKE_CASE )
__a : int = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : Optional[int] = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ):
__a : Optional[Any] = get_model_classes(_SCREAMING_SNAKE_CASE )
__a : str = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 27 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __lowerCamelCase ( lowerCAmelCase_ ):
'''simple docstring'''
A_ : Optional[int] = 'umt5'
A_ : str = ['past_key_values']
def __init__( self , __UpperCAmelCase=250112 , __UpperCAmelCase=512 , __UpperCAmelCase=64 , __UpperCAmelCase=1024 , __UpperCAmelCase=8 , __UpperCAmelCase=None , __UpperCAmelCase=6 , __UpperCAmelCase=32 , __UpperCAmelCase=128 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=1.0 , __UpperCAmelCase="gated-gelu" , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="T5Tokenizer" , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , **__UpperCAmelCase , ) -> Optional[Any]:
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
_a = vocab_size
_a = d_model
_a = d_kv
_a = d_ff
_a = num_layers
_a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_a = num_heads
_a = relative_attention_num_buckets
_a = relative_attention_max_distance
_a = dropout_rate
_a = layer_norm_epsilon
_a = initializer_factor
_a = feed_forward_proj
_a = use_cache
_a = self.feed_forward_proj.split('''-''' )
_a = act_info[-1]
_a = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
_a = 'gelu_new'
@property
def _UpperCAmelCase ( self ) -> str:
return self.d_model
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return self.num_heads
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.num_layers
class __lowerCamelCase ( lowerCAmelCase_ ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
_a = 'past_encoder_sequence + sequence'
_a = {0: 'batch'}
_a = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_a = {0: 'batch', 1: 'decoder_sequence'}
_a = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _UpperCAmelCase ( self ) -> Tuple:
return 13
@property
def _UpperCAmelCase ( self ) -> Optional[Any]:
return 5e-4 | 320 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionInpaintPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : str = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : Dict = CLIPTextModel(__a )
__a : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Tuple = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
__a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : str = torch.Generator(device=__a ).manual_seed(__a )
__a : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
__a : str = self.get_dummy_components()
__a : Union[str, Any] = StableDiffusionInpaintPipeline(**__a )
__a : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__a : List[Any] = self.get_dummy_inputs(__a )
__a : Dict = sd_pipe(**__a ).images
__a : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__a : List[Any] = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
__a : Optional[int] = 'stabilityai/stable-diffusion-2-inpainting'
__a : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : int = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__a : Union[str, Any] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : int = torch.manual_seed(0 )
__a : Optional[Any] = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
__a : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
__a : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
__a : str = 'stabilityai/stable-diffusion-2-inpainting'
__a : Any = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
__a : str = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__a : str = 'Face of a yellow cat, high resolution, sitting on a park bench'
__a : Tuple = torch.manual_seed(0 )
__a : str = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
__a : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 27 | 0 |
import warnings
from functools import wraps
from typing import Callable
def a( A : Callable ) -> Any:
"""simple docstring"""
@wraps(_SCREAMING_SNAKE_CASE )
def _inner_fn(*A : List[str] , **A : List[str] ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , _SCREAMING_SNAKE_CASE , )
return fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return _inner_fn
| 227 |
'''simple docstring'''
import requests
__lowercase : Tuple = '' # <-- Put your OpenWeatherMap appid here!
__lowercase : Tuple = 'https://api.openweathermap.org/data/2.5/'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Chicago" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "Kolkata, India" , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : float = 5_5.6_8 , _SCREAMING_SNAKE_CASE : float = 1_2.5_7 , _SCREAMING_SNAKE_CASE : str = APPID ):
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__lowercase : Dict = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 27 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCAmelCase__ : Optional[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ : List[Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ : Any = 8
else:
lowerCAmelCase__ : Optional[int] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='longest' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCAmelCase__ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _SCREAMING_SNAKE_CASE ) == "1":
lowerCAmelCase__ : Optional[int] = 2
# New Code #
lowerCAmelCase__ : Union[str, Any] = int(args.gradient_accumulation_steps )
lowerCAmelCase__ : Dict = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase__ : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Dict = config['lr']
lowerCAmelCase__ : Any = int(config['num_epochs'] )
lowerCAmelCase__ : int = int(config['seed'] )
lowerCAmelCase__ : List[str] = int(config['batch_size'] )
lowerCAmelCase__ : Any = evaluate.load('glue' , 'mrpc' )
set_seed(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowerCAmelCase__ : Any = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ : Any = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=_SCREAMING_SNAKE_CASE , model=_SCREAMING_SNAKE_CASE , local_sgd_steps=_SCREAMING_SNAKE_CASE , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : List[Any] = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = output.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_SCREAMING_SNAKE_CASE , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=_SCREAMING_SNAKE_CASE , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
lowerCAmelCase__ : str = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main() | 212 |
'''simple docstring'''
import torch
from transformers import AutoModel
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , __a="sayef/fsner-bert-base-uncased" ):
'''simple docstring'''
super(__a , self ).__init__()
__a : Tuple = AutoModel.from_pretrained(__a , return_dict=__a )
__a : int = torch.nn.CosineSimilarity(3 , 1E-0_8 )
__a : Union[str, Any] = torch.nn.Softmax(dim=1 )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.bert(**__a ).last_hidden_state
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return token_embeddings.sum(2 , keepdim=__a )
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
return self.softmax(T * self.cos(__a , __a ) )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
__a : str = W_supports['sizes'].tolist()
__a : Union[str, Any] = W_supports['start_token_id'].item()
__a : Any = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__a : Tuple = self.BERT(**__a )
__a : str = self.BERT(**__a )
__a : Any = None
__a : Dict = None
__a : Dict = W_supports['input_ids'] == start_token_id
__a : Union[str, Any] = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__a ):
if i == 0:
__a : Optional[int] = 0
else:
__a : Union[str, Any] = support_sizes[i - 1]
__a : int = S[s : s + size][start_token_masks[s : s + size]]
__a : Union[str, Any] = S[s : s + size][end_token_masks[s : s + size]]
__a : Tuple = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__a : Dict = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__a : str = torch.vstack((p_starts, p_start) )
__a : str = torch.vstack((p_ends, p_end) )
else:
__a : List[str] = p_start
__a : int = p_end
return p_starts, p_ends
| 27 | 0 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __magic_name__ ( lowerCAmelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Dict = 'char'
lowerCamelCase__ : Tuple = 'bpe'
lowerCamelCase__ : List[str] = 'wp'
lowerCamelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __magic_name__ ( lowerCAmelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Any = ['image_processor', 'char_tokenizer']
lowerCamelCase__ : Any = 'ViTImageProcessor'
lowerCamelCase__ : Optional[Any] = 'MgpstrTokenizer'
def __init__( self, lowercase_=None, lowercase_=None, **lowercase_ ) -> int:
"""simple docstring"""
a__ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', __a, )
a__ =kwargs.pop('''feature_extractor''' )
a__ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
a__ =tokenizer
a__ =AutoTokenizer.from_pretrained('''gpt2''' )
a__ =AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(__a, __a )
def __call__( self, lowercase_=None, lowercase_=None, lowercase_=None, **lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
a__ =self.image_processor(__a, return_tensors=__a, **__a )
if text is not None:
a__ =self.char_tokenizer(__a, return_tensors=__a, **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
a__ =encodings['input_ids']
return inputs
def _UpperCAmelCase ( self, lowercase_ ) -> Dict:
"""simple docstring"""
a__ =sequences
a__ =char_preds.size(0 )
a__ =self._decode_helper(__a, '''char''' )
a__ =self._decode_helper(__a, '''bpe''' )
a__ =self._decode_helper(__a, '''wp''' )
a__ =[]
a__ =[]
for i in range(__a ):
a__ =[char_scores[i], bpe_scores[i], wp_scores[i]]
a__ =[char_strs[i], bpe_strs[i], wp_strs[i]]
a__ =scores.index(max(__a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
a__ ={}
a__ =final_strs
a__ =final_scores
a__ =char_strs
a__ =bpe_strs
a__ =wp_strs
return out
def _UpperCAmelCase ( self, lowercase_, lowercase_ ) -> Optional[int]:
"""simple docstring"""
if format == DecodeType.CHARACTER:
a__ =self.char_decode
a__ =1
a__ ='[s]'
elif format == DecodeType.BPE:
a__ =self.bpe_decode
a__ =2
a__ ='#'
elif format == DecodeType.WORDPIECE:
a__ =self.wp_decode
a__ =102
a__ ='[SEP]'
else:
raise ValueError(F"""Format {format} is not supported.""" )
a__ =[], []
a__ =pred_logits.size(0 )
a__ =pred_logits.size(1 )
a__ =pred_logits.topk(1, dim=-1, largest=__a, sorted=__a )
a__ =preds_index.view(-1, __a )[:, 1:]
a__ =decoder(__a )
a__ =torch.nn.functional.softmax(__a, dim=2 ).max(dim=2 )
a__ =preds_max_prob[:, 1:]
for index in range(__a ):
a__ =preds_str[index].find(__a )
a__ =preds_str[index][:pred_eos]
a__ =preds_index[index].cpu().tolist()
a__ =pred_index.index(__a ) if eos_token in pred_index else -1
a__ =preds_max_prob[index][: pred_eos_index + 1]
a__ =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__a )
conf_scores.append(__a )
return dec_strs, conf_scores
def _UpperCAmelCase ( self, lowercase_ ) -> List[Any]:
"""simple docstring"""
a__ =[seq.replace(''' ''', '''''' ) for seq in self.char_tokenizer.batch_decode(__a )]
return decode_strs
def _UpperCAmelCase ( self, lowercase_ ) -> List[Any]:
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(__a )
def _UpperCAmelCase ( self, lowercase_ ) -> Tuple:
"""simple docstring"""
a__ =[seq.replace(''' ''', '''''' ) for seq in self.wp_tokenizer.batch_decode(__a )]
return decode_strs
| 188 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
__a : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__a : int = x_den * y_den * z_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 35 ):
__a : set = set()
__a : int
__a : Fraction = Fraction(0 )
__a : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__a : Union[str, Any] = x_num * y_den + x_den * y_num
__a : Optional[Any] = x_den * y_den
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : Optional[int] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__a : Union[str, Any] = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Any = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Optional[int] = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[Any] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
__a : int = x_num * y_num
__a : Optional[Any] = x_den * y_num + x_num * y_den
__a : Tuple = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : Any = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
__a : List[Any] = x_num * x_num * y_num * y_num
__a : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : Union[str, Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
__a : int = gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__a : List[str] = add_three(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 27 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : str = frozenset([] )
def a ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
snake_case_ = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
snake_case_ = CLIPTextModel(__a )
snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
snake_case_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def a ( self , snake_case , snake_case=0 ):
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((64, 64) )
snake_case_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(__a ).startswith('mps' ):
snake_case_ = torch.manual_seed(__a )
else:
snake_case_ = torch.Generator(device=__a ).manual_seed(__a )
snake_case_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def a ( self ):
snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**__a )
snake_case_ = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
snake_case_ = self.get_dummy_inputs(__a )
snake_case_ = sd_pipe(**__a ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(__a , safety_checker=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def a ( self ):
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
snake_case_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
__a , torch_dtype=torch.floataa , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , output_type='np' , )
snake_case_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def a ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
snake_case_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
snake_case_ = 'stabilityai/stable-diffusion-2-inpainting'
snake_case_ = PNDMScheduler.from_pretrained(__a , subfolder='scheduler' )
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
__a , safety_checker=__a , scheduler=__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=__a , image=__a , mask_image=__a , generator=__a , num_inference_steps=2 , output_type='np' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 285 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ort.SessionOptions()
__a : Dict = False
return options
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = 'A red cat sitting on a park bench'
__a : int = np.random.RandomState(0 )
__a : Tuple = pipe(
prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 27 | 0 |
"""simple docstring"""
import math
def __magic_name__ ( ) -> int:
lowercase : Union[str, Any] = input("Enter message: " )
lowercase : List[str] = int(input(f"""Enter key [2-{len(_SCREAMING_SNAKE_CASE ) - 1}]: """ ) )
lowercase : Tuple = input("Encryption/Decryption [e/d]: " )
if mode.lower().startswith("e" ):
lowercase : Tuple = encrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif mode.lower().startswith("d" ):
lowercase : Optional[int] = decrypt_message(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f"""Output:\n{text + "|"}""" )
def __magic_name__ ( __snake_case : int , __snake_case : str ) -> int:
lowercase : Any = [''] * key
for col in range(_SCREAMING_SNAKE_CASE ):
lowercase : str = col
while pointer < len(_SCREAMING_SNAKE_CASE ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( __snake_case : int , __snake_case : str ) -> str:
lowercase : int = math.ceil(len(_SCREAMING_SNAKE_CASE ) / key )
lowercase : Tuple = key
lowercase : Union[str, Any] = (num_cols * num_rows) - len(_SCREAMING_SNAKE_CASE )
lowercase : Dict = [''] * num_cols
lowercase : Optional[int] = 0
lowercase : str = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowercase : Union[str, Any] = 0
row += 1
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 202 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Dict = 16
__lowercase : List[Any] = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Optional[int] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : Dict = torch.cuda.memory_allocated()
__a : List[Any] = torch.cuda.max_memory_allocated()
__a : Tuple = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : List[Any] = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Tuple = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : int = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
# Initialize accelerator
__a : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Dict = config['lr']
__a : str = int(config['num_epochs'] )
__a : Optional[int] = int(config['seed'] )
__a : Any = int(config['batch_size'] )
__a : List[str] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : int = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Union[str, Any] = 1
__a : Tuple = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : str = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : List[Any] = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Optional[Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : str = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : List[Any] = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : str = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : List[Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[str] = parser.parse_args()
__a : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 27 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : Dict = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ['YolosFeatureExtractor']
UpperCAmelCase__ : Optional[Any] = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121 |
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : List[Any] = 'bart'
__lowercase : Union[str, Any] = True
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : List[Any] = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased' )
__a : Dict = AutoModel.from_pretrained('yjernite/retribert-base-uncased' ).to('cuda:0' )
__a : Optional[int] = qar_model.eval()
else:
__a , __a : str = (None, None)
if MODEL_TYPE == "bart":
__a : Union[str, Any] = AutoTokenizer.from_pretrained('yjernite/bart_eli5' )
__a : int = AutoModelForSeqaSeqLM.from_pretrained('yjernite/bart_eli5' ).to('cuda:0' )
__a : Optional[Any] = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth' )
sas_model.load_state_dict(save_dict['model'] )
__a : str = sas_model.eval()
else:
__a , __a : Tuple = make_qa_sas_model(
model_name='t5-small' , from_file='seq2seq_models/eli5_t5_model_1024_4.pth' , device='cuda:0' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
if LOAD_DENSE_INDEX:
__a : Optional[Any] = faiss.StandardGpuResources()
__a : Dict = datasets.load_dataset(path='wiki_snippets' , name='wiki40b_en_100_0' )['train']
__a : int = np.memmap(
'wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat' , dtype='float32' , mode='r' , shape=(wikiaab_passages.num_rows, 128) , )
__a : int = faiss.IndexFlatIP(128 )
__a : Any = faiss.index_cpu_to_gpu(_SCREAMING_SNAKE_CASE , 1 , _SCREAMING_SNAKE_CASE )
wikiaab_gpu_index_flat.add(_SCREAMING_SNAKE_CASE ) # TODO fix for larger GPU
else:
__a , __a : str = (None, None)
__a : Optional[int] = Elasticsearch([{'host': 'localhost', 'port': '9200'}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Dict = datasets.load_dataset('eli5' , name='LFQA_reddit' )
__a : Dict = elia['train_eli5']
__a : Optional[int] = np.memmap(
'eli5_questions_reps.dat' , dtype='float32' , mode='r' , shape=(elia_train.num_rows, 128) )
__a : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_SCREAMING_SNAKE_CASE )
return (elia_train, eli5_train_q_index)
__lowercase , __lowercase , __lowercase : Any = load_indexes()
__lowercase , __lowercase , __lowercase , __lowercase : Dict = load_models()
__lowercase , __lowercase : int = load_train_data()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str]=10 ):
__a : Optional[int] = embed_questions_for_retrieval([question] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a , __a : Union[str, Any] = eli5_train_q_index.search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Any = [elia_train[int(_SCREAMING_SNAKE_CASE )] for i in I[0]]
return nn_examples
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str="wiki40b" , _SCREAMING_SNAKE_CASE : List[str]="dense" , _SCREAMING_SNAKE_CASE : Any=10 ):
if source == "none":
__a , __a : Any = (' <P> '.join(['' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__a , __a : str = query_qa_dense_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a : Union[str, Any] = query_es_index(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index_name='english_wiki40b_snippets_100w' , n_results=_SCREAMING_SNAKE_CASE , )
__a : Dict = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__a : Any = 'question: {} context: {}'.format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _SCREAMING_SNAKE_CASE : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _SCREAMING_SNAKE_CASE : None),
} )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict=64 , _SCREAMING_SNAKE_CASE : Dict=256 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.9_5 , _SCREAMING_SNAKE_CASE : str=0.8 ):
with torch.no_grad():
__a : Union[str, Any] = qa_sas_generate(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_answers=1 , num_beams=_SCREAMING_SNAKE_CASE , min_len=_SCREAMING_SNAKE_CASE , max_len=_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , temp=_SCREAMING_SNAKE_CASE , top_p=_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE , max_input_length=1_024 , device='cuda:0' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
__lowercase : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
__lowercase : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : Dict = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
__lowercase : Union[str, Any] = st.sidebar.checkbox('Demo options')
if demo_options:
__lowercase : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
__lowercase : Tuple = action_list.index(action_st)
__lowercase : Tuple = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
__lowercase : List[Any] = show_type == 'Show full text of passages'
else:
__lowercase : int = 3
__lowercase : str = True
__lowercase : Tuple = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
__lowercase : List[Any] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
__lowercase : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
__lowercase : str = 'wiki40b'
__lowercase : List[Any] = 'dense'
__lowercase : Dict = 'beam'
__lowercase : Optional[int] = 2
__lowercase : List[str] = 64
__lowercase : Tuple = 2_56
__lowercase : List[str] = None
__lowercase : Tuple = None
__lowercase : List[Any] = st.sidebar.checkbox('Generation options')
if generate_options:
__lowercase : Optional[Any] = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
__lowercase : List[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
__lowercase : Tuple = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
__lowercase : int = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : Any = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Dict = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : List[str] = None
# start main text
__lowercase : int = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
__lowercase : Optional[int] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Any = st.text_input('Enter your question here:', '')
else:
__lowercase : Any = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase , __lowercase : Optional[int] = make_support(question, source=wiki_source, method='dense', n_results=10)
__lowercase , __lowercase : List[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
__lowercase : Optional[int] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : str = support_list[:10]
__lowercase : Optional[int] = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
__lowercase , __lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase , __lowercase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
__lowercase : str = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
__lowercase : Any = res[1].strip()
if sec_titles == "":
__lowercase : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
__lowercase : Union[str, Any] = sec_titles.split(' & ')
__lowercase : str = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : str = find_nearest_training(question)
__lowercase : Optional[int] = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
__lowercase : Any = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
__lowercase : List[Any] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 27 | 0 |
import re
import subprocess
import sys
a_ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
a_ = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
a_ = '|'.join(sys.argv[1:])
a_ = re.compile(rF"""^({joined_dirs}).*?\.py$""")
a_ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 175 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : List[Any] = torch.device('cpu')
def lowerCamelCase ():
__a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__a : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : int = dct.pop(_SCREAMING_SNAKE_CASE )
__a : Tuple = val
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = []
for k in state_dict.keys():
__a : List[Any] = k
if ".pwconv" in k:
__a : List[Any] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__a : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__a : Union[str, Any] = k_new.split('.' )
if ls[2].isdigit():
__a : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
__a : Union[str, Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__a : List[str] = 1_000
__a : Tuple = 'huggingface/label-files'
__a : str = 'imagenet-1k-id2label.json'
__a : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__a : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a : Any = idalabel
__a : str = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__a : Dict = [3, 3, 6, 4]
__a : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__a : Dict = [3, 3, 9, 6]
__a : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__a : Dict = [4, 3, 10, 5]
__a : Optional[int] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__a : Tuple = [4, 4, 12, 6]
__a : Dict = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__a : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )
else:
__a : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
__a : Optional[Any] = checkpoint
__a : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
__a : Tuple = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
__a : Tuple = prepare_img()
__a : str = ViTImageProcessor.from_pretrained('preprocessor_config' )
__a : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# compare outputs from both models
__a : List[Any] = get_expected_output(_SCREAMING_SNAKE_CASE )
__a : Dict = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
__lowercase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 27 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int]=None ) -> List[Any]:
lowercase_ : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase_ : Tuple = True, True
lowercase_ : Dict = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> Dict:
lowercase_ : Any = 0
lowercase_ : str = -1
for i in range(_SCREAMING_SNAKE_CASE ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase_ : List[str] = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] ) -> Optional[Any]:
lowercase_ : List[Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase_ : Union[str, Any] = check_circuit_or_path(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
lowercase_ : List[Any] = 1
if check == 2:
lowercase_ : str = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
lowercase_ : Any = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase_ : Any = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase_ : Optional[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase_ : Dict = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase_ : List[Any] = {
1: [],
2: []
# all degree is zero
}
lowercase_ : Dict = 10
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_euler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 239 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __UpperCamelCase ( lowerCAmelCase_ ):
A_ = "umt5"
A_ = ["past_key_values"]
def __init__( self , __a=25_0112 , __a=512 , __a=64 , __a=1024 , __a=8 , __a=None , __a=6 , __a=32 , __a=128 , __a=0.1 , __a=1E-6 , __a=1.0 , __a="gated-gelu" , __a=True , __a=True , __a="T5Tokenizer" , __a=True , __a=0 , __a=1 , __a=0 , **__a , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__a , tokenizer_class=__a , tie_word_embeddings=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
__a : Any = vocab_size
__a : Any = d_model
__a : str = d_kv
__a : Dict = d_ff
__a : Union[str, Any] = num_layers
__a : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a : Optional[int] = num_heads
__a : Tuple = relative_attention_num_buckets
__a : Optional[Any] = relative_attention_max_distance
__a : Optional[int] = dropout_rate
__a : List[Any] = layer_norm_epsilon
__a : int = initializer_factor
__a : Union[str, Any] = feed_forward_proj
__a : Any = use_cache
__a : List[Any] = self.feed_forward_proj.split('-' )
__a : Dict = act_info[-1]
__a : Dict = act_info[0] == 'gated'
if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
__a : Optional[int] = 'gelu_new'
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.d_model
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_heads
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.num_layers
class __UpperCamelCase ( lowerCAmelCase_ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__a : Dict = 'past_encoder_sequence + sequence'
__a : Tuple = {0: 'batch'}
__a : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
__a : int = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__a , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 13
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return 5E-4
| 27 | 0 |
"""simple docstring"""
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
SCREAMING_SNAKE_CASE__ = Mapping[str, np.ndarray]
SCREAMING_SNAKE_CASE__ = Mapping[str, Any] # Is a nested dict.
SCREAMING_SNAKE_CASE__ = 0.01
@dataclasses.dataclass(frozen=lowerCAmelCase_ )
class lowerCAmelCase_ :
"""simple docstring"""
_lowerCAmelCase : Dict = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_lowerCAmelCase : Optional[Any] = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_lowerCAmelCase : str = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_lowerCAmelCase : int = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_lowerCAmelCase : Tuple = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_lowerCAmelCase : Union[str, Any] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_lowerCAmelCase : str = None
# Templates used to generate this protein (prediction-only)
_lowerCAmelCase : List[Any] = None
# Chain corresponding to each parent
_lowerCAmelCase : int = None
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
snake_case = r'(\[[A-Z]+\]\n)'
snake_case = [tag.strip() for tag in re.split(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0]
snake_case = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
snake_case = ["N", "CA", "C"]
snake_case = None
snake_case = None
snake_case = None
for g in groups:
if "[PRIMARY]" == g[0]:
snake_case = g[1][0].strip()
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if seq[i] not in residue_constants.restypes:
snake_case = 'X' # FIXME: strings are immutable
snake_case = np.array(
[residue_constants.restype_order.get(_SCREAMING_SNAKE_CASE , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
snake_case = []
for axis in range(3 ):
tertiary.append(list(map(_SCREAMING_SNAKE_CASE , g[1][axis].split() ) ) )
snake_case = np.array(_SCREAMING_SNAKE_CASE )
snake_case = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
snake_case = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
snake_case = np.zeros(
(
len(_SCREAMING_SNAKE_CASE ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_SCREAMING_SNAKE_CASE ):
snake_case = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_SCREAMING_SNAKE_CASE , atom_mask=_SCREAMING_SNAKE_CASE , aatype=_SCREAMING_SNAKE_CASE , residue_index=np.arange(len(_SCREAMING_SNAKE_CASE ) ) , b_factors=_SCREAMING_SNAKE_CASE , )
def lowerCAmelCase__ ( _UpperCamelCase : Protein , _UpperCamelCase : int = 0 ) -> Optional[int]:
"""simple docstring"""
snake_case = []
snake_case = prot.remark
if remark is not None:
pdb_headers.append(f"""REMARK {remark}""" )
snake_case = prot.parents
snake_case = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
snake_case = [p for i, p in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if i == chain_id]
if parents is None or len(_SCREAMING_SNAKE_CASE ) == 0:
snake_case = ['N/A']
pdb_headers.append(f"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}""" )
return pdb_headers
def lowerCAmelCase__ ( _UpperCamelCase : Protein , _UpperCamelCase : str ) -> List[Any]:
"""simple docstring"""
snake_case = []
snake_case = pdb_str.split('\n' )
snake_case = prot.remark
if remark is not None:
out_pdb_lines.append(f"""REMARK {remark}""" )
snake_case = 42
if prot.parents is not None and len(prot.parents ) > 0:
snake_case = []
if prot.parents_chain_index is not None:
snake_case = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_SCREAMING_SNAKE_CASE ) , [] )
parent_dict[str(_SCREAMING_SNAKE_CASE )].append(_SCREAMING_SNAKE_CASE )
snake_case = max([int(_SCREAMING_SNAKE_CASE ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
snake_case = parent_dict.get(str(_SCREAMING_SNAKE_CASE ) , ['N/A'] )
parents_per_chain.append(_SCREAMING_SNAKE_CASE )
else:
parents_per_chain.append(list(prot.parents ) )
else:
snake_case = [['N/A']]
def make_parent_line(_UpperCamelCase : Sequence[str] ) -> str:
return f"""PARENT {" ".join(_SCREAMING_SNAKE_CASE )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
snake_case = 0
for i, l in enumerate(_SCREAMING_SNAKE_CASE ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_SCREAMING_SNAKE_CASE )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_SCREAMING_SNAKE_CASE ):
snake_case = parents_per_chain[chain_counter]
else:
snake_case = ['N/A']
out_pdb_lines.append(make_parent_line(_SCREAMING_SNAKE_CASE ) )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _UpperCamelCase : Protein ) -> Optional[int]:
"""simple docstring"""
snake_case = residue_constants.restypes + ['X']
def res_atoa(_UpperCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
snake_case = residue_constants.atom_types
snake_case = []
snake_case = prot.atom_mask
snake_case = prot.aatype
snake_case = prot.atom_positions
snake_case = prot.residue_index.astype(np.intaa )
snake_case = prot.b_factors
snake_case = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
snake_case = get_pdb_headers(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
pdb_lines.extend(_SCREAMING_SNAKE_CASE )
snake_case = aatype.shape[0]
snake_case = 1
snake_case = 0
snake_case = string.ascii_uppercase
snake_case = None
# Add all atom sites.
for i in range(_SCREAMING_SNAKE_CASE ):
snake_case = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_SCREAMING_SNAKE_CASE , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
snake_case = 'ATOM'
snake_case = atom_name if len(_SCREAMING_SNAKE_CASE ) == 4 else f""" {atom_name}"""
snake_case = ''
snake_case = ''
snake_case = 1.00
snake_case = atom_name[0] # Protein supports only C, N, O, S, this works.
snake_case = ''
snake_case = 'A'
if chain_index is not None:
snake_case = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
snake_case = (
f"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
f"""{res_name_a:>3} {chain_tag:>1}"""
f"""{residue_index[i]:>4}{insertion_code:>1} """
f"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
f"""{occupancy:>6.2f}{b_factor:>6.2f} """
f"""{element:>2}{charge:>2}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
snake_case = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
snake_case = True
snake_case = chain_index[i + 1]
if should_terminate:
# Close the chain.
snake_case = 'TER'
snake_case = (
f"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(_SCREAMING_SNAKE_CASE )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _UpperCamelCase : Protein ) -> List[str]:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase__ ( _UpperCamelCase : FeatureDict , _UpperCamelCase : ModelOutput , _UpperCamelCase : Optional[np.ndarray] = None , _UpperCamelCase : Optional[np.ndarray] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Sequence[str]] = None , _UpperCamelCase : Optional[Sequence[int]] = None , ) -> List[Any]:
"""simple docstring"""
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_SCREAMING_SNAKE_CASE , remark=_SCREAMING_SNAKE_CASE , parents=_SCREAMING_SNAKE_CASE , parents_chain_index=_SCREAMING_SNAKE_CASE , )
| 150 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
__a : List[Any] = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , 'html.parser' )
__a : Union[str, Any] = soup.findAll('h1' )
__a : int = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 27 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A__ :
_UpperCAmelCase : Tuple = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be trained."""})
_UpperCAmelCase : List[str] = field(
default="""./""" , metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""})
_UpperCAmelCase : Any = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path of training dataset."""})
_UpperCAmelCase : List[str] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
_UpperCAmelCase : int = field(default=2 , metadata={"""help""": """Batch size for training."""})
_UpperCAmelCase : List[str] = field(default=2 , metadata={"""help""": """Batch size for evaluation."""})
_UpperCAmelCase : Optional[Any] = field(default=0.1 , metadata={"""help""": """Value of weight decay."""})
_UpperCAmelCase : str = field(
default=1_0000 , metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""})
_UpperCAmelCase : Optional[int] = field(default=2E-4 , metadata={"""help""": """Learning rate fo training."""})
_UpperCAmelCase : int = field(default="""cosine""" , metadata={"""help""": """Learning rate."""})
_UpperCAmelCase : str = field(
default=750 , metadata={"""help""": """Number of warmup steps in the learning rate schedule."""})
_UpperCAmelCase : Union[str, Any] = field(
default=16 , metadata={"""help""": """Number of gradient accumulation steps."""})
_UpperCAmelCase : Tuple = field(
default=lowerCAmelCase_ , metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""})
_UpperCAmelCase : str = field(default=5_0000 , metadata={"""help""": """Maximum number of training steps."""})
_UpperCAmelCase : Optional[Any] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
_UpperCAmelCase : List[Any] = field(default=1024 , metadata={"""help""": """Sequence lengths used for training."""})
_UpperCAmelCase : List[Any] = field(default=1 , metadata={"""help""": """Training seed."""})
_UpperCAmelCase : Tuple = field(
default=1024 , metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} , )
_UpperCAmelCase : Union[str, Any] = field(
default=lowerCAmelCase_ , metadata={"""help""": """States path if the training should continue from a checkpoint folder."""})
_UpperCAmelCase : str = field(default=lowerCAmelCase_ , metadata={"""help""": """If True the data is pretokenized."""})
@dataclass
class A__ :
_UpperCAmelCase : Tuple = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
_UpperCAmelCase : Union[str, Any] = field(
default="""codeparrot/codeparrot-clean-valid""" , metadata={"""help""": """Name or path of validation dataset."""})
_UpperCAmelCase : List[str] = field(default=2 , metadata={"""help""": """Batch size used for evaluation."""})
_UpperCAmelCase : List[Any] = field(
default=-1 , metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""})
_UpperCAmelCase : Optional[int] = field(default=1024 , metadata={"""help""": """Length of sequences to be evaluated."""})
_UpperCAmelCase : str = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
@dataclass
class A__ :
_UpperCAmelCase : Any = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Model name or path of model to be evaluated."""})
_UpperCAmelCase : Tuple = field(default=lowerCAmelCase_ , metadata={"""help""": """Number of workers used for code evaluation."""})
_UpperCAmelCase : List[Any] = field(
default=lowerCAmelCase_ , metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} , )
_UpperCAmelCase : Tuple = field(
default=lowerCAmelCase_ , metadata={"""help""": """Sample from the language model's output distribution."""})
_UpperCAmelCase : Any = field(default=0.2 , metadata={"""help""": """Sampling temperature used for generation."""})
_UpperCAmelCase : Any = field(default=256 , metadata={"""help""": """Maximum number of newly generated tokens."""})
_UpperCAmelCase : Optional[Any] = field(default=0 , metadata={"""help""": """Top-k parameter used for generation."""})
_UpperCAmelCase : Any = field(default=0.95 , metadata={"""help""": """Top-p parameter used for nucleus sampling."""})
_UpperCAmelCase : str = field(default=10 , metadata={"""help""": """Number of generations to run in parallel."""})
_UpperCAmelCase : Tuple = field(
default=200 , metadata={"""help""": """Number of completions to generate for each sample."""})
_UpperCAmelCase : int = field(default=1 , metadata={"""help""": """Random seed used for evaluation."""})
_UpperCAmelCase : str = field(
default="""eval_results.json""" , metadata={"""help""": """Random seed used for evaluation."""})
_UpperCAmelCase : int = field(
default="""0""" , metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""})
_UpperCAmelCase : Dict = field(
default=-1 , metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} , )
@dataclass
class A__ :
_UpperCAmelCase : str = field(
default=lowerCAmelCase_ , metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} , )
_UpperCAmelCase : str = field(
default="""transformersbook/codeparrot""" , metadata={"""help""": """Folder or name of dataset to process."""})
_UpperCAmelCase : Dict = field(
default="""codeparrot-clean""" , metadata={"""help""": """Folder to save processed processed dataset."""})
_UpperCAmelCase : List[Any] = field(
default=10_0000 , metadata={"""help""": """Number of files to save per JSON output file."""})
_UpperCAmelCase : List[str] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
_UpperCAmelCase : Optional[int] = field(
default=1000 , metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""})
_UpperCAmelCase : Any = field(
default=100 , metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""})
_UpperCAmelCase : Union[str, Any] = field(
default=0.25 , metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""})
_UpperCAmelCase : Dict = field(
default=1.5 , metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""})
_UpperCAmelCase : Union[str, Any] = field(
default=0.7 , metadata={"""help""": """Probability for filtering config, test and uncommon files."""})
_UpperCAmelCase : Optional[Any] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""} , )
_UpperCAmelCase : Union[str, Any] = field(
default=lowerCAmelCase_ , metadata={"""help""": """If True, near-duplicate samples are removed."""})
_UpperCAmelCase : int = field(
default=0.85 , metadata={"""help""": """Jaccard threshold for near-duplicate samples."""})
@dataclass
class A__ :
_UpperCAmelCase : Optional[Any] = field(
default="""gpt2""" , metadata={"""help""": """Base tokenizer to build new tokenizer from."""})
_UpperCAmelCase : List[str] = field(
default="""transformersbook/codeparrot-train""" , metadata={"""help""": """Dataset to train tokenizer on."""})
_UpperCAmelCase : List[Any] = field(default="""content""" , metadata={"""help""": """Column containing text data to process."""})
_UpperCAmelCase : Union[str, Any] = field(default=20_0000 , metadata={"""help""": """Number of examples to train tokenizer on."""})
_UpperCAmelCase : Optional[Any] = field(
default=3_2768 , metadata={"""help""": """Number of examples to train the tokenizer on."""})
_UpperCAmelCase : List[Any] = field(default="""codeparrot""" , metadata={"""help""": """Name of new tokenizer."""})
_UpperCAmelCase : Any = field(default=lowerCAmelCase_ , metadata={"""help""": """Push saved tokenizer to the hub."""})
@dataclass
class A__ :
_UpperCAmelCase : Dict = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Name or path to the tokenizer."""})
_UpperCAmelCase : Dict = field(
default="""codeparrot/codeparrot-clean-train""" , metadata={"""help""": """Name or path to the dataset to pretokenize."""})
_UpperCAmelCase : Optional[Any] = field(
default="""tokenized-codeparrot-train""" , metadata={"""help""": """Repo name of the pretokenized data."""})
_UpperCAmelCase : List[str] = field(default=lowerCAmelCase_ , metadata={"""help""": """Number of workers used for code evaluation."""})
@dataclass
class A__ :
_UpperCAmelCase : Union[str, Any] = field(
default="""gpt2-large""" , metadata={"""help""": """Configuration to use for model initialization."""})
_UpperCAmelCase : Union[str, Any] = field(
default="""codeparrot/codeparrot""" , metadata={"""help""": """Tokenizer attached to model."""})
_UpperCAmelCase : Optional[int] = field(default="""codeparrot""" , metadata={"""help""": """Name of the created model."""})
_UpperCAmelCase : Optional[int] = field(default=lowerCAmelCase_ , metadata={"""help""": """Push saved tokenizer to the hub."""})
| 287 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=2 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=36 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=6 , __UpperCAmelCase=6 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=1000 , ) -> Union[str, Any]:
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = patch_size
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = coordinate_size
_a = shape_size
_a = num_labels
_a = num_choices
_a = scope
_a = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_a = text_seq_length
_a = (image_size // patch_size) ** 2 + 1
_a = self.text_seq_length + self.image_seq_length
def _UpperCAmelCase ( self ) -> Any:
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_a = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_a = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_a = bbox[i, j, 3]
_a = bbox[i, j, 1]
_a = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_a = bbox[i, j, 2]
_a = bbox[i, j, 0]
_a = tmp_coordinate
_a = tf.constant(__a )
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.text_seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_a = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = TFLayoutLMvaModel(config=__a )
# text + image
_a = model(__a , pixel_values=__a , training=__a )
_a = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
_a = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_a = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_a = model({'''pixel_values''': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
_a = self.num_labels
_a = TFLayoutLMvaForSequenceClassification(config=__a )
_a = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
_a = self.num_labels
_a = TFLayoutLMvaForTokenClassification(config=__a )
_a = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_a = 2
_a = TFLayoutLMvaForQuestionAnswering(config=__a )
_a = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.prepare_config_and_inputs()
(_a) = config_and_inputs
_a = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
A_ : Any = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ : Tuple = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ : List[str] = False
A_ : List[str] = False
A_ : Tuple = False
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
return True
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]:
_a = copy.deepcopy(__a )
if model_class in get_values(__a ):
_a = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
_a = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
_a = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
_a = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = TFLayoutLMvaModelTester(self )
_a = ConfigTester(self , config_class=__a , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
if getattr(__a , '''hf_compute_loss''' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
_a = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
_a = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
_a = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_a = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
_a = prepared_for_class.pop('''input_ids''' )
_a = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_a = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
_a = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
_a = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_a = -100
_a = tf.convert_to_tensor(__a )
_a = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_a = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
_a = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_a = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
_a = prepared_for_class.keys() - inputs_dict.keys()
_a = inspect.signature(model.call ).parameters
_a = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_a = {0: 'input_ids'}
for label_key in label_keys:
_a = signature_names.index(__a )
_a = label_key
_a = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_a = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_a = prepared_for_class[value]
_a = tuple(__a )
# Send to model
_a = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _UpperCAmelCase ( self ) -> Any:
(
_a
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def _UpperCAmelCase ( self ) -> List[str]:
(
_a
) = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_a = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def _UpperCAmelCase ( self ) -> List[str]:
(
_a
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def _UpperCAmelCase ( self ) -> Tuple:
(
_a
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def _UpperCAmelCase ( self ) -> List[str]:
(
_a
) = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def A_ ( ):
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self ) -> Any:
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors='''tf''' ).pixel_values
_a = tf.constant([[1, 2]] )
_a = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_a = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
_a = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
_a = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) ) | 320 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
__lowercase : Optional[Any] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
for attribute in key.split('.' ):
__a : Any = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
__a : List[Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
__a : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__a : Tuple = value
elif weight_type == "weight_g":
__a : str = value
elif weight_type == "weight_v":
__a : Optional[Any] = value
elif weight_type == "bias":
__a : Union[str, Any] = value
else:
__a : List[Any] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : int = []
__a : List[str] = fairseq_model.state_dict()
__a : Tuple = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__a : int = None
for name, value in fairseq_dict.items():
__a : List[str] = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__a : List[str] = True
elif name.split('.' )[0] == "proj":
__a : Tuple = fairseq_model.proj
__a : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__a : List[Any] = True
if "*" in mapped_key:
__a : str = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__a : int = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__a : List[Any] = 'weight_g'
elif "weight_v" in name:
__a : List[Any] = 'weight_v'
elif "bias" in name:
__a : Optional[Any] = 'bias'
elif "weight" in name:
__a : Tuple = 'weight'
else:
__a : Optional[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
__a : List[str] = full_name.split('conv_layers.' )[-1]
__a : Any = name.split('.' )
__a : List[str] = int(items[0] )
__a : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__a : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__a : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__a : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__a : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a , __a : List[str] = emb.weight.shape
__a : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__a : Optional[int] = emb.weight.data
return lin_layer
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Tuple = [line.split(' ' )[0] for line in lines]
__a : int = len(_SCREAMING_SNAKE_CASE )
__a : List[Any] = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(_SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] , ):
__a : Optional[int] = WavaVecaConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : Any = SpeechaTextaConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE , decoder_layers=_SCREAMING_SNAKE_CASE , do_stable_layer_norm=_SCREAMING_SNAKE_CASE )
__a : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
__a , __a , __a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__a : Optional[int] = model[0].eval()
# set weights for wav2vec2 encoder
__a : Tuple = WavaVecaModel(_SCREAMING_SNAKE_CASE )
__a : int = recursively_load_weights_wavaveca(model.encoder , _SCREAMING_SNAKE_CASE )
__a : Dict = SpeechaTextaForCausalLM(_SCREAMING_SNAKE_CASE )
__a , __a : Optional[int] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=_SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__a : Optional[Any] = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__a : Tuple = SpeechEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
__a : int = False
# add projection layer
__a : str = nn.Parameter(projection_layer.weight )
__a : Any = nn.Parameter(projection_layer.bias )
__a : str = create_vocab_dict(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[Any] = SpeechaTextaTokenizer(os.path.join(_SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = hf_wavavec.config.to_dict()
__a : Tuple = tokenizer.pad_token_id
__a : Optional[int] = tokenizer.bos_token_id
__a : Union[str, Any] = tokenizer.eos_token_id
__a : Tuple = 'speech_to_text_2'
__a : Tuple = 'wav2vec2'
__a : List[str] = SpeechEncoderDecoderConfig.from_dict(_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_02_24, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
__lowercase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 27 | 0 |
_lowercase: Optional[int] = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
_lowercase: int = {value: key for key, value in MORSE_CODE_DICT.items()}
def a( A : str ) -> List[str]:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def a( A : str ) -> Tuple:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def a( ) -> Tuple:
"""simple docstring"""
a = 'Morse code here!'
print(_SCREAMING_SNAKE_CASE )
a = encrypt(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
a = decrypt(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 227 |
'''simple docstring'''
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : int ):
with open(_SCREAMING_SNAKE_CASE , 'r' ) as fh:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*_SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(_SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
__lowercase : Dict = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__lowercase : Tuple = torch.device('cuda', local_rank)
__lowercase : Optional[int] = socket.gethostname()
__lowercase : List[str] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowercase : str = dist.get_rank()
__lowercase : Union[str, Any] = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 27 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
lowerCAmelCase__ : Tuple = None
if token is not None:
lowerCAmelCase__ : int = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
lowerCAmelCase__ : Dict = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowerCAmelCase__ : List[str] = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
lowerCAmelCase__ : Any = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
lowerCAmelCase__ : Optional[int] = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : int = requests.get(url + F'''&page={i + 2}''' , headers=_SCREAMING_SNAKE_CASE ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
lowerCAmelCase__ : List[str] = None
if token is not None:
lowerCAmelCase__ : Union[str, Any] = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
lowerCAmelCase__ : Union[str, Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
lowerCAmelCase__ : Dict = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
lowerCAmelCase__ : Union[str, Any] = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
lowerCAmelCase__ : int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Any = requests.get(url + F'''&page={i + 2}''' , headers=_SCREAMING_SNAKE_CASE ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : List[Any] = None
if token is not None:
lowerCAmelCase__ : int = {'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''}
lowerCAmelCase__ : str = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = result.headers['Location']
lowerCAmelCase__ : Optional[Any] = requests.get(_SCREAMING_SNAKE_CASE , allow_redirects=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = os.path.join(_SCREAMING_SNAKE_CASE , F'''{artifact_name}.zip''' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fp:
fp.write(response.content )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : int = []
lowerCAmelCase__ : int = None
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_SCREAMING_SNAKE_CASE ) as f:
for line in f:
lowerCAmelCase__ : List[Any] = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCAmelCase__ : Tuple = line[: line.index(': ' )]
lowerCAmelCase__ : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
lowerCAmelCase__ : Tuple = line[len('FAILED ' ) :]
failed_tests.append(_SCREAMING_SNAKE_CASE )
elif filename == "job_name.txt":
lowerCAmelCase__ : List[str] = line
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(_SCREAMING_SNAKE_CASE )} for `errors` '''
F'''and {len(_SCREAMING_SNAKE_CASE )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
' problem.' )
lowerCAmelCase__ : Optional[Any] = None
if job_name and job_links:
lowerCAmelCase__ : Tuple = job_links.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# A list with elements of the form (line of error, error, failed test)
lowerCAmelCase__ : str = [x + [y] + [job_link] for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : str = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_SCREAMING_SNAKE_CASE , job_links=_SCREAMING_SNAKE_CASE ) )
return errors
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
lowerCAmelCase__ : List[Any] = Counter()
counter.update([x[1] for x in logs] )
lowerCAmelCase__ : Dict = counter.most_common()
lowerCAmelCase__ : str = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCAmelCase__ : Union[str, Any] = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCAmelCase__ : List[str] = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Any = test.split('::' )[0]
if test.startswith('tests/models/' ):
lowerCAmelCase__ : Tuple = test.split('/' )[2]
else:
lowerCAmelCase__ : Union[str, Any] = None
return test
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCAmelCase__ : Dict = [x for x in logs if x[2] is not None]
lowerCAmelCase__ : Any = {x[2] for x in logs}
lowerCAmelCase__ : Optional[int] = {}
for test in tests:
lowerCAmelCase__ : Optional[Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCAmelCase__ : int = counter.most_common()
lowerCAmelCase__ : Dict = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCAmelCase__ : List[str] = sum(error_counts.values() )
if n_errors > 0:
lowerCAmelCase__ : Optional[Any] = {'count': n_errors, 'errors': error_counts}
lowerCAmelCase__ : Optional[Any] = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=_SCREAMING_SNAKE_CASE ) )
return r
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : Any = '| no. | error | status |'
lowerCAmelCase__ : Optional[int] = '|-:|:-|:-|'
lowerCAmelCase__ : Any = [header, sep]
for error in reduced_by_error:
lowerCAmelCase__ : Union[str, Any] = reduced_by_error[error]['count']
lowerCAmelCase__ : Union[str, Any] = F'''| {count} | {error[:100]} | |'''
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = '| model | no. of errors | major error | count |'
lowerCAmelCase__ : Union[str, Any] = '|-:|-:|-:|-:|'
lowerCAmelCase__ : Any = [header, sep]
for model in reduced_by_model:
lowerCAmelCase__ : Optional[int] = reduced_by_model[model]['count']
lowerCAmelCase__ : Any = list(reduced_by_model[model]['errors'].items() )[0]
lowerCAmelCase__ : Optional[int] = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(_SCREAMING_SNAKE_CASE )
return "\n".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
lowerCamelCase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowerCamelCase__ = get_job_links(args.workflow_run_id, token=args.token)
lowerCamelCase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowerCamelCase__ = k.find(""" / """)
lowerCamelCase__ = k[index + len(""" / """) :]
lowerCamelCase__ = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowerCamelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowerCamelCase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowerCamelCase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowerCamelCase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowerCamelCase__ = reduce_by_error(errors)
lowerCamelCase__ = reduce_by_model(errors)
lowerCamelCase__ = make_github_table(reduced_by_error)
lowerCamelCase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa) | 212 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase__ ( _A : str , _A : str ):
'''simple docstring'''
a__ =list(_SCREAMING_SNAKE_CASE )
a__ =list(_SCREAMING_SNAKE_CASE )
a__ =0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
a__ ='_'
if count > 1:
return False
else:
return "".join(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _A : list[str] ):
'''simple docstring'''
a__ =[]
while True:
a__ =['$'] * len(_SCREAMING_SNAKE_CASE )
a__ =[]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
a__ =compare_string(binary[i] , binary[j] )
if k is False:
a__ ='*'
a__ ='*'
temp.append('''X''' )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return pi
a__ =list(set(_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__ ( _A : int , _A : Sequence[float] ):
'''simple docstring'''
a__ =[]
for minterm in minterms:
a__ =''
for _ in range(_SCREAMING_SNAKE_CASE ):
a__ =str(minterm % 2 ) + string
minterm //= 2
temp.append(_SCREAMING_SNAKE_CASE )
return temp
def UpperCAmelCase__ ( _A : str , _A : str , _A : int ):
'''simple docstring'''
a__ =list(_SCREAMING_SNAKE_CASE )
a__ =list(_SCREAMING_SNAKE_CASE )
a__ =0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase__ ( _A : list[list[int]] , _A : list[str] ):
'''simple docstring'''
a__ =[]
a__ =[0] * len(_SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
a__ =0
a__ =-1
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
a__ =j
if count == 1:
a__ =1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_SCREAMING_SNAKE_CASE ) ):
a__ =0
temp.append(prime_implicants[i] )
while True:
a__ =0
a__ =-1
a__ =0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
a__ =chart[i].count(1 )
if count_n > max_n:
a__ =count_n
a__ =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
a__ =0
def UpperCAmelCase__ ( _A : list[str] , _A : list[str] ):
'''simple docstring'''
a__ =[[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
a__ =prime_implicants[i].count('''_''' )
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ):
a__ =1
return chart
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =int(input('''Enter the no. of variables\n''' ) )
a__ =[
float(_SCREAMING_SNAKE_CASE )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
a__ =decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ =check(_SCREAMING_SNAKE_CASE )
print('''Prime Implicants are:''' )
print(_SCREAMING_SNAKE_CASE )
a__ =prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__ =selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('''Essential Prime Implicants are:''' )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 188 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowercase : Tuple = pytest.mark.integration
__lowercase : Optional[int] = {'comet'}
__lowercase : List[str] = importlib.util.find_spec('fairseq') is not None
__lowercase : str = {'code_eval'}
__lowercase : List[Any] = os.name == 'nt'
__lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase ():
__a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@local
class __UpperCamelCase ( parameterized.TestCase ):
A_ = {}
A_ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = '[...]'
__a : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
__a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__a : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = '[...]'
__a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def __UpperCAmelCase ( self ):
'''simple docstring'''
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join('metrics' , __a ) , *__a , **__a )
with patch('datasets.load_metric' ) as mock_load_metric:
__a : Dict = load_local_metric
yield
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
def wrapper(__a ):
__a : Optional[Any] = contextmanager(__a )
__a : str = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__a : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
import torch
def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__a : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ):
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
assert len(__a ) == 2
__a : Dict = [0.19, 0.92]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__a : str = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__a : int = load_from_checkpoint
yield
def lowerCamelCase ():
__a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) )
__a : List[str] = 'ERROR'
__a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
| 27 | 0 |
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
create_state_space_tree(_SCREAMING_SNAKE_CASE , [] , 0 , [0 for i in range(len(_SCREAMING_SNAKE_CASE ) )] )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
if index == len(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE )
return
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case_ = True
create_state_space_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 , _SCREAMING_SNAKE_CASE )
current_sequence.pop()
snake_case_ = False
_UpperCAmelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
_UpperCAmelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 285 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] )
__a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] )
else:
__a : int = np.asarray(__a )
__a : str = np.asarray(__a )
if ignore_case:
__a : Dict = np.char.lower(__a )
__a : List[str] = np.char.lower(__a )
if ignore_punctuation:
__a : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Dict = np.char.translate(__a , table=__a )
if ignore_numbers:
__a : Optional[int] = string.digits.maketrans('' , '' , string.digits )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Optional[int] = np.char.translate(__a , table=__a )
__a : Any = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 27 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_lowerCamelCase : int = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_lowerCamelCase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 50 ) -> int:
"""simple docstring"""
UpperCamelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Union[str, Any]=5_1_2 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : List[str]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def A ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
UpperCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['hidden_states'][0]
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['hidden_states'][0]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
_SCREAMING_SNAKE_CASE = (OpenLlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = OpenLlamaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'single_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = 3
UpperCamelCase = 'multi_label_classification'
UpperCamelCase = input_dict['input_ids']
UpperCamelCase = input_ids.ne(1 ).to(UpperCamelCase__ )
UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCamelCase = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def A ( self : Tuple ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def A ( self : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = ids_tensor([1, 1_0] , config.vocab_size )
UpperCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
UpperCamelCase = original_model(UpperCamelCase__ ).last_hidden_state
UpperCamelCase = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase = {'type': scaling_type, 'factor': 1_0.0}
UpperCamelCase = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
UpperCamelCase = scaled_model(UpperCamelCase__ ).last_hidden_state
UpperCamelCase = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list:
"""simple docstring"""
UpperCamelCase = len(A__ )
for i in range(1 , A__ ):
UpperCamelCase = collection[i]
UpperCamelCase = 0
UpperCamelCase = i - 1
while low <= high:
UpperCamelCase = (low + high) // 2
if val < collection[mid]:
UpperCamelCase = mid - 1
else:
UpperCamelCase = mid + 1
for j in range(A__ , A__ , -1 ):
UpperCamelCase = collection[j - 1]
UpperCamelCase = val
return collection
if __name__ == "__main__":
_lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 28 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCamelCase : Optional[int] = 25_6047
_lowerCamelCase : Dict = 25_6145
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = NllbTokenizer
_SCREAMING_SNAKE_CASE = NllbTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = {}
def A ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = NllbTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = NllbTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = tokenizer_r.save_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
UpperCamelCase = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
UpperCamelCase = tokenizer_r.from_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
UpperCamelCase = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
UpperCamelCase = tokenizer_r.from_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
UpperCamelCase = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase = tokenizer_r.from_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
@require_torch
def A ( self : Dict ):
"""simple docstring"""
if not self.test_seqaseq:
return
UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
UpperCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
UpperCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
UpperCamelCase = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCamelCase__ , tgt_texts=UpperCamelCase__ , max_length=3 , max_target_length=1_0 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 1_0 )
# max_target_length will default to max_length if not specified
UpperCamelCase = tokenizer.prepare_seqaseq_batch(
UpperCamelCase__ , tgt_texts=UpperCamelCase__ , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
UpperCamelCase = tokenizer.prepare_seqaseq_batch(
src_texts=UpperCamelCase__ , max_length=3 , max_target_length=1_0 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , UpperCamelCase__ )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Optional[int] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = [AddedToken('<special>' , lstrip=UpperCamelCase__ )]
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = tokenizer_r.encode('Hey this is a <special> token' )
UpperCamelCase = tokenizer_r.encode('<special>' , add_special_tokens=UpperCamelCase__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase = self.tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = tokenizer_p.encode('Hey this is a <special> token' )
UpperCamelCase = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """facebook/nllb-200-distilled-600M"""
_SCREAMING_SNAKE_CASE = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
_SCREAMING_SNAKE_CASE = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
_SCREAMING_SNAKE_CASE = [
256_047,
16_297,
134_408,
8_165,
248_066,
14_734,
950,
1_135,
105_721,
3_573,
83,
27_352,
108,
49_486,
2,
]
@classmethod
def A ( cls : str ):
"""simple docstring"""
UpperCamelCase = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
UpperCamelCase = 1
return cls
def A ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 2_5_6_0_5_7 )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
UpperCamelCase = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , UpperCamelCase__ )
UpperCamelCase = 1_0
UpperCamelCase = self.tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [2_5_6_2_0_3, 3] )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase__ )
UpperCamelCase = NllbTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase__ )
@require_torch
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
UpperCamelCase = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 1_5) , batch.input_ids.shape )
self.assertEqual((2, 1_5) , batch.attention_mask.shape )
UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.tokenizer(self.src_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=3 , return_tensors='pt' )
UpperCamelCase = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=1_0 , return_tensors='pt' )
UpperCamelCase = targets['input_ids']
UpperCamelCase = shift_tokens_right(
UpperCamelCase__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# A, test, EOS, en_XX
'input_ids': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_6_0_5_7,
} , )
@require_torch
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
UpperCamelCase = False
UpperCamelCase = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
| 28 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase = []
for i in range(A__ ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ )
UpperCamelCase = 1.0 - self.betas
UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# setable values
UpperCamelCase = None
UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() )
UpperCamelCase = variance_type
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ):
"""simple docstring"""
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) )
UpperCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase = variance.log()
UpperCamelCase = beta.log()
UpperCamelCase = (predicted_variance + 1) / 2
UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
UpperCamelCase = self.alphas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = torch.clamp(
UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device )
UpperCamelCase = self._get_variance(
UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , )
if self.variance_type == "fixed_small_log":
UpperCamelCase = variance
elif self.variance_type == "learned_range":
UpperCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
UpperCamelCase = variance * variance_noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ):
"""simple docstring"""
UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase = timesteps.to(original_samples.device )
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 28 | 1 |
'''simple docstring'''
from typing import Any
import numpy as np
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
return np.array_equal(A__ , matrix.conjugate().T )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = v.conjugate().T
UpperCamelCase = v_star.dot(A__ )
assert isinstance(A__ , np.ndarray )
return (v_star_dot.dot(A__ )) / (v_star.dot(A__ ))
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
UpperCamelCase = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
UpperCamelCase = np.array([[1], [2], [3]] )
assert is_hermitian(A__ ), F"""{a} is not hermitian."""
print(rayleigh_quotient(A__ , A__ ) )
UpperCamelCase = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(A__ ), F"""{a} is not hermitian."""
assert rayleigh_quotient(A__ , A__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 28 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
| 28 | 1 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=9_9 , UpperCamelCase__ : Dict=2_4 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=6 , UpperCamelCase__ : Optional[Any]=3_7 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Any=5_1_2 , UpperCamelCase__ : Union[str, Any]=1_6 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Optional[Any]=0.0_2 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[str]=1_0_0_0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = range_bbox
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase = bbox[i, j, 3]
UpperCamelCase = bbox[i, j, 1]
UpperCamelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase = bbox[i, j, 2]
UpperCamelCase = bbox[i, j, 0]
UpperCamelCase = t
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def A ( self : Dict ):
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def A ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = LiltModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ , bbox=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ , bbox=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = LiltForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict , ):
"""simple docstring"""
UpperCamelCase = LiltForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , bbox=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return True
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = LiltModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@slow
def A ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = LiltModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([[1, 2]] , device=UpperCamelCase__ )
UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(input_ids=UpperCamelCase__ , bbox=UpperCamelCase__ )
UpperCamelCase = torch.Size([1, 2, 7_6_8] )
UpperCamelCase = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=UpperCamelCase__ , )
self.assertTrue(outputs.last_hidden_state.shape , UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCamelCase__ , atol=1E-3 ) )
| 28 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 1 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCamelCase = Vector()
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase__ ) , '(0,0,0,0,0,1)' )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase__ ) , 4 )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2] )
UpperCamelCase = Vector([1, 2, 3, 4, 5] )
UpperCamelCase = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCamelCase = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
UpperCamelCase = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
UpperCamelCase = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
UpperCamelCase = Vector([2, -1, 4] ) # for test of dot product
UpperCamelCase = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def A ( self : Dict ):
"""simple docstring"""
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = Vector([1, 2, 3] )
UpperCamelCase = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase__ , UpperCamelCase__ ) ) , '(3,4,7)' )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = Vector([1, 0, 0, 0, 0, 0] )
UpperCamelCase = x.copy()
self.assertEqual(str(UpperCamelCase__ ) , str(UpperCamelCase__ ) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase__ ) , '(0,1,0)' )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(UpperCamelCase__ ) )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCamelCase = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCamelCase = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCamelCase = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(UpperCamelCase__ ) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCamelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCamelCase = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def A ( self : List[str] ):
"""simple docstring"""
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28 | 1 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """owlvit_text_model"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[Any]=4_9_4_0_8 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : int=1_2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Any="quick_gelu" , UpperCamelCase__ : Any=1E-5 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0_2 , UpperCamelCase__ : Optional[int]=1.0 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Dict=4_9_4_0_6 , UpperCamelCase__ : str=4_9_4_0_7 , **UpperCamelCase__ : int , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = intermediate_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_act
UpperCamelCase = layer_norm_eps
UpperCamelCase = attention_dropout
UpperCamelCase = initializer_range
UpperCamelCase = initializer_factor
@classmethod
def A ( cls : List[str] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
UpperCamelCase = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """owlvit_vision_model"""
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int]=7_6_8 , UpperCamelCase__ : Dict=3_0_7_2 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Union[str, Any]=7_6_8 , UpperCamelCase__ : Optional[Any]=3_2 , UpperCamelCase__ : List[str]="quick_gelu" , UpperCamelCase__ : str=1E-5 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[Any]=0.0_2 , UpperCamelCase__ : List[Any]=1.0 , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = hidden_size
UpperCamelCase = intermediate_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = hidden_act
UpperCamelCase = layer_norm_eps
UpperCamelCase = attention_dropout
UpperCamelCase = initializer_range
UpperCamelCase = initializer_factor
@classmethod
def A ( cls : Optional[int] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : str ):
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('model_type' ) == "owlvit":
UpperCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """owlvit"""
_SCREAMING_SNAKE_CASE = True
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=5_1_2 , UpperCamelCase__ : List[Any]=2.6_5_9_2 , UpperCamelCase__ : List[Any]=True , **UpperCamelCase__ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
if text_config is None:
UpperCamelCase = {}
logger.info('text_config is None. Initializing the OwlViTTextConfig with default values.' )
if vision_config is None:
UpperCamelCase = {}
logger.info('vision_config is None. initializing the OwlViTVisionConfig with default values.' )
UpperCamelCase = OwlViTTextConfig(**UpperCamelCase__ )
UpperCamelCase = OwlViTVisionConfig(**UpperCamelCase__ )
UpperCamelCase = projection_dim
UpperCamelCase = logit_scale_init_value
UpperCamelCase = return_dict
UpperCamelCase = 1.0
@classmethod
def A ( cls : List[str] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
cls._set_token_in_kwargs(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def A ( cls : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = {}
UpperCamelCase = text_config
UpperCamelCase = vision_config
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.text_config.to_dict()
UpperCamelCase = self.vision_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@property
def A ( self : Any ):
"""simple docstring"""
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
] )
@property
def A ( self : List[str] ):
"""simple docstring"""
return OrderedDict(
[
('logits_per_image', {0: 'batch'}),
('logits_per_text', {0: 'batch'}),
('text_embeds', {0: 'batch'}),
('image_embeds', {0: 'batch'}),
] )
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return 1E-4
def A ( self : List[Any] , UpperCamelCase__ : "ProcessorMixin" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : Optional["TensorType"] = None , ):
"""simple docstring"""
UpperCamelCase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , framework=UpperCamelCase__ )
UpperCamelCase = super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCamelCase__ , framework=UpperCamelCase__ )
return {**text_input_dict, **image_input_dict}
@property
def A ( self : str ):
"""simple docstring"""
return 1_4
| 28 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase : Optional[int] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCamelCase : Union[str, Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCamelCase : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase : List[Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCamelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) )
UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(A__ )
UpperCamelCase = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) )
UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ )
UpperCamelCase = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 28 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : Union[str, Any] = "\\n\n"
_lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase = 'cuda'
else:
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase = model.to(UpperCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase = model.config.max_length - 1
else:
UpperCamelCase = model.config.max_length
UpperCamelCase = tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ )
UpperCamelCase = encodings['input_ids']
UpperCamelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase = []
UpperCamelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ):
UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) )
UpperCamelCase = encoded_texts[start_index:end_index]
UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ )
UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 )
UpperCamelCase = encoded_batch
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits
UpperCamelCase = out_logits[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = attn_mask[..., 1:].contiguous()
UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
| 28 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ):
"""simple docstring"""
UpperCamelCase = sigma_max
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
raise NotImplementedError()
| 28 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase = {
'do_resize': True,
'size': {'height': 1_8, 'width': 1_8},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
UpperCamelCase = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : str , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Union[str, Any] , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(UpperCamelCase__ , return_tensors='np' )
UpperCamelCase = processor(images=UpperCamelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = processor(text=UpperCamelCase__ )
UpperCamelCase = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(UpperCamelCase__ ):
processor()
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase = 'lower newer'
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
_lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
UpperCamelCase = self.layer[current_layer](UpperCamelCase__ , UpperCamelCase__ , head_mask[current_layer] )
UpperCamelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , _a , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : str ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
UpperCamelCase = BertEncoderWithPabee(UpperCamelCase__ )
self.init_weights()
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def A ( self : List[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = threshold
def A ( self : Dict , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = patience
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = 0
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.inference_layers_num / self.inference_instances_num
UpperCamelCase = (
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(UpperCamelCase__ )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=False , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
if token_type_ids is None:
UpperCamelCase = torch.zeros(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCamelCase , UpperCamelCase , UpperCamelCase = encoder_hidden_states.size()
UpperCamelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCamelCase = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
UpperCamelCase = self.invert_attention_mask(UpperCamelCase__ )
else:
UpperCamelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase = self.get_head_mask(UpperCamelCase__ , self.config.num_hidden_layers )
UpperCamelCase = self.embeddings(
input_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ )
UpperCamelCase = embedding_output
if self.training:
UpperCamelCase = []
for i in range(self.config.num_hidden_layers ):
UpperCamelCase = self.encoder.adaptive_forward(
UpperCamelCase__ , current_layer=UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ )
UpperCamelCase = self.pooler(UpperCamelCase__ )
UpperCamelCase = output_layers[i](output_dropout(UpperCamelCase__ ) )
res.append(UpperCamelCase__ )
elif self.patience == 0: # Use all layers for inference
UpperCamelCase = self.encoder(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = self.pooler(encoder_outputs[0] )
UpperCamelCase = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase__ )]
else:
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCamelCase = self.encoder.adaptive_forward(
UpperCamelCase__ , current_layer=UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ )
UpperCamelCase = self.pooler(UpperCamelCase__ )
UpperCamelCase = output_layers[i](UpperCamelCase__ )
if regression:
UpperCamelCase = logits.detach()
if patient_result is not None:
UpperCamelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCamelCase = 0
else:
UpperCamelCase = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCamelCase = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase__ ) ):
patient_counter += 1
else:
UpperCamelCase = 0
UpperCamelCase = logits
if patient_counter == self.patience:
break
UpperCamelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , _a , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
UpperCamelCase = config.num_labels
UpperCamelCase = BertModelWithPabee(UpperCamelCase__ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def A ( self : str , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[Any]=None , ):
"""simple docstring"""
UpperCamelCase = self.bert(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCamelCase = (logits[-1],)
if labels is not None:
UpperCamelCase = None
UpperCamelCase = 0
for ix, logits_item in enumerate(UpperCamelCase__ ):
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCamelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCamelCase = (total_loss / total_weights,) + outputs
return outputs
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowerCamelCase : str = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( A__ ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCamelCase = model_type_to_module_name(A__ )
UpperCamelCase = importlib.import_module(F""".{module_name}""" , 'transformers.models' )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(A__ , '__name__' , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCamelCase = importlib.import_module('transformers' )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def __lowerCamelCase ( A__ , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , A__ = None , A__ = False , **A__ , ) -> int:
"""simple docstring"""
UpperCamelCase = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(A__ , encoding='utf-8' ) as reader:
return json.load(A__ )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str ):
"""simple docstring"""
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase__ )
def A ( cls : Optional[int] , UpperCamelCase__ : str , **UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = kwargs.pop('config' , UpperCamelCase__ )
UpperCamelCase = kwargs.pop('trust_remote_code' , UpperCamelCase__ )
UpperCamelCase = True
UpperCamelCase , UpperCamelCase = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = config_dict.get('image_processor_type' , UpperCamelCase__ )
UpperCamelCase = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
UpperCamelCase = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCamelCase = config_dict.pop('feature_extractor_type' , UpperCamelCase__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
UpperCamelCase = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCamelCase = config_dict['auto_map']['AutoFeatureExtractor']
UpperCamelCase = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# It could be in `config.image_processor_type``
UpperCamelCase = getattr(UpperCamelCase__ , 'image_processor_type' , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
UpperCamelCase = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
UpperCamelCase = image_processor_class_from_name(UpperCamelCase__ )
UpperCamelCase = image_processor_auto_map is not None
UpperCamelCase = image_processor_class is not None or type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING
UpperCamelCase = resolve_trust_remote_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if has_remote_code and trust_remote_code:
UpperCamelCase = get_class_from_dynamic_module(
UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = kwargs.pop('code_revision' , UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING:
UpperCamelCase = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase__ )]
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def A ( UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__ )
| 28 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Any = {
"nielsr/canine-s": 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase : int = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase : Any = 0
_lowerCamelCase : List[Any] = 0xE000
_lowerCamelCase : str = 0xE001
_lowerCamelCase : str = 0xE002
_lowerCamelCase : Dict = 0xE003
_lowerCamelCase : str = 0xE004
# Maps special codepoints to human-readable names.
_lowerCamelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[str] , UpperCamelCase__ : Tuple=chr(UpperCamelCase__ ) , UpperCamelCase__ : Dict=chr(UpperCamelCase__ ) , UpperCamelCase__ : Optional[int]=chr(UpperCamelCase__ ) , UpperCamelCase__ : int=chr(UpperCamelCase__ ) , UpperCamelCase__ : Optional[int]=chr(UpperCamelCase__ ) , UpperCamelCase__ : Tuple=chr(UpperCamelCase__ ) , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=2_0_4_8 , **UpperCamelCase__ : Any , ):
"""simple docstring"""
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , model_max_length=UpperCamelCase__ , **UpperCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCamelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCamelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCamelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCamelCase = UNICODE_VOCAB_SIZE
UpperCamelCase = len(self._special_codepoints )
@property
def A ( self : int ):
"""simple docstring"""
return self._unicode_vocab_size
def A ( self : List[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
return list(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : str ):
"""simple docstring"""
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def A ( self : str , UpperCamelCase__ : int ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def A ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return "".join(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
UpperCamelCase = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def A ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def A ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
return ()
| 28 |
'''simple docstring'''
_lowerCamelCase : int = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def __lowerCamelCase ( A__ ) -> bytes:
"""simple docstring"""
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(A__ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ):
"""simple docstring"""
UpperCamelCase = sigma_max
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
raise NotImplementedError()
| 28 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : int = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCamelCase : Optional[Any] = get_tests_dir("fixtures")
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = mock.Mock()
UpperCamelCase = 5_0_0
UpperCamelCase = {}
UpperCamelCase = HTTPError
UpperCamelCase = {}
# Download this model to make sure it's in the cache.
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCamelCase__ ) as mock_head:
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : str ):
"""simple docstring"""
UpperCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase__ )
@classmethod
def A ( cls : Dict ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id='test-feature-extractor' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
UpperCamelCase__ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=UpperCamelCase__ , use_auth_token=self._token )
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(UpperCamelCase__ , getattr(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : int ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
UpperCamelCase = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=UpperCamelCase__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 28 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int:
"""simple docstring"""
def get_dataset(A__ ):
UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> int:
"""simple docstring"""
UpperCamelCase = []
for epoch in range(A__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase = batch
UpperCamelCase = model(A__ )
UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ )
accelerator.backward(A__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A ( self : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
return x * self.a + self.b
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
# Train baseline
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' )
accelerator.save_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' )
accelerator.save_state(UpperCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase__ )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ )
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch.tensor([1, 2, 3] )
UpperCamelCase = torch.tensor([2, 3, 4] )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(net.parameters() )
UpperCamelCase = Accelerator()
with self.assertRaises(UpperCamelCase__ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def A ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
UpperCamelCase = scheduler.state_dict()
train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(UpperCamelCase__ , scheduler.state_dict() )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = "/tmp/accelerate/state_checkpointing"
_lowerCamelCase : Union[str, Any] = DummyModel()
_lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_lowerCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCamelCase ,_lowerCamelCase : Tuple = dummy_dataloaders()
_lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCamelCase ,_lowerCamelCase : Tuple = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCamelCase : Any = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_lowerCamelCase : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_lowerCamelCase : Optional[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_lowerCamelCase : Dict = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 28 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Any = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCamelCase = k.replace(A__ , A__ )
if k.startswith('encoder' ):
UpperCamelCase = k.replace('.attn' , '.self_attn' )
UpperCamelCase = k.replace('norm1' , 'self_attn_layer_norm' )
UpperCamelCase = k.replace('norm2' , 'final_layer_norm' )
elif k.startswith('decoder' ):
UpperCamelCase = k.replace('norm1' , 'self_attn_layer_norm' )
UpperCamelCase = k.replace('norm2' , 'encoder_attn_layer_norm' )
UpperCamelCase = k.replace('norm3' , 'final_layer_norm' )
return k
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = [
'model.encoder.layernorm_embedding.weight',
'model.encoder.layernorm_embedding.bias',
'model.decoder.layernorm_embedding.weight',
'model.decoder.layernorm_embedding.bias',
]
for k in keys:
UpperCamelCase = sd.pop(A__ )
UpperCamelCase = k.replace('layernorm_embedding' , 'layer_norm' )
assert new_k not in sd
UpperCamelCase = v
_lowerCamelCase : Any = ["START"]
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
UpperCamelCase = torch.load(A__ , map_location='cpu' )
UpperCamelCase = model['model']
UpperCamelCase = BlenderbotConfig.from_json_file(A__ )
UpperCamelCase = BlenderbotForConditionalGeneration(A__ )
UpperCamelCase = m.model.state_dict().keys()
UpperCamelCase = []
UpperCamelCase = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCamelCase = rename_state_dict_key(A__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCamelCase = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A__ )
m.model.load_state_dict(A__ , strict=A__ )
m.half()
m.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_lowerCamelCase : str = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 28 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : List[str] = 5_0000
_lowerCamelCase : Optional[int] = 5000
_lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__)
_lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
UpperCamelCase = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
UpperCamelCase = func(A__ , **A__ )
print('shuffling dataset' )
UpperCamelCase = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(A__ ) )
UpperCamelCase = func(
A__ , **A__ )
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=UpperCamelCase__ , )
assert hasattr(self , 'env' )
def A ( self : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = {
'enabled': True,
'processes_per_host': 8,
}
UpperCamelCase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
UpperCamelCase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
UpperCamelCase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='py36' , )
def A ( self : Optional[int] , UpperCamelCase__ : int ):
"""simple docstring"""
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def A ( self : List[str] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , UpperCamelCase__ )
| 28 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase__ )
else:
scores.append(UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = abs(A__ )
UpperCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = abs(A__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
return sum(int(A__ ) for c in str(abs(A__ ) ) )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A__ , A__ ) -> None:
UpperCamelCase = F"""{func.__name__}({value})"""
UpperCamelCase = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(A__ )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A__ , A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 28 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> Image:
"""simple docstring"""
def brightness(A__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 28 | 1 |
'''simple docstring'''
import argparse
import json
import subprocess
def __lowerCamelCase ( A__ , A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
UpperCamelCase = subprocess.run(A__ , shell=A__ , stdout=subprocess.PIPE )
UpperCamelCase = output.stdout.decode('utf-8' )
UpperCamelCase = json.loads(A__ )
UpperCamelCase = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(A__ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(A__ ) )
if len(A__ ) > 0:
UpperCamelCase = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
return values.split(',' )
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
_lowerCamelCase : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 28 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 28 | 1 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_lowerCamelCase : List[str] = TypeVar("T")
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42 # Cache store of keys
_SCREAMING_SNAKE_CASE = 42 # References of the keys in cache
_SCREAMING_SNAKE_CASE = 10 # Maximum capacity of cache
def __init__( self : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = deque()
UpperCamelCase = set()
if not n:
UpperCamelCase = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCamelCase = n
def A ( self : Any , UpperCamelCase__ : T ):
"""simple docstring"""
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCamelCase = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[int] ):
"""simple docstring"""
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 28 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = 3_0_0
return config
def A ( self : Tuple ):
"""simple docstring"""
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = MraModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = MraModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = MraForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = MraForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = ()
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = MraModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MraModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='MRA does not output attentions' )
def A ( self : List[str] ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 28 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : Union[str, Any] = "\\n\n"
_lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase = 'cuda'
else:
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase = model.to(UpperCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase = model.config.max_length - 1
else:
UpperCamelCase = model.config.max_length
UpperCamelCase = tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ )
UpperCamelCase = encodings['input_ids']
UpperCamelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase = []
UpperCamelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ):
UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) )
UpperCamelCase = encoded_texts[start_index:end_index]
UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ )
UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 )
UpperCamelCase = encoded_batch
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits
UpperCamelCase = out_logits[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = attn_mask[..., 1:].contiguous()
UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
| 28 | 1 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase__ )
else:
scores.append(UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 50 ) -> int:
"""simple docstring"""
UpperCamelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
# Initialise PyTorch model
UpperCamelCase = FunnelConfig.from_json_file(A__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase = FunnelBaseModel(A__ ) if base_model else FunnelModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(A__ , A__ , A__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not."
)
_lowerCamelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list:
"""simple docstring"""
UpperCamelCase = len(A__ )
for i in range(1 , A__ ):
UpperCamelCase = collection[i]
UpperCamelCase = 0
UpperCamelCase = i - 1
while low <= high:
UpperCamelCase = (low + high) // 2
if val < collection[mid]:
UpperCamelCase = mid - 1
else:
UpperCamelCase = mid + 1
for j in range(A__ , A__ , -1 ):
UpperCamelCase = collection[j - 1]
UpperCamelCase = val
return collection
if __name__ == "__main__":
_lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 28 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> list[int]:
"""simple docstring"""
return [ord(A__ ) - 96 for elem in plain]
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
UpperCamelCase = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , A__ )
print('Decoded:' , decode(A__ ) )
if __name__ == "__main__":
main()
| 28 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase = []
for i in range(A__ ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ )
UpperCamelCase = 1.0 - self.betas
UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# setable values
UpperCamelCase = None
UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() )
UpperCamelCase = variance_type
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ):
"""simple docstring"""
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) )
UpperCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase = variance.log()
UpperCamelCase = beta.log()
UpperCamelCase = (predicted_variance + 1) / 2
UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
UpperCamelCase = self.alphas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = torch.clamp(
UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device )
UpperCamelCase = self._get_variance(
UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , )
if self.variance_type == "fixed_small_log":
UpperCamelCase = variance
elif self.variance_type == "learned_range":
UpperCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
UpperCamelCase = variance * variance_noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ):
"""simple docstring"""
UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase = timesteps.to(original_samples.device )
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> float:
"""simple docstring"""
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 28 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
| 28 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase = self.block_out_channels[i]
UpperCamelCase = self.block_out_channels[i + 1]
UpperCamelCase = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
UpperCamelCase = nn.Conv(
UpperCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(UpperCamelCase__ )
UpperCamelCase = blocks
UpperCamelCase = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : int , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = self.conv_in(UpperCamelCase__ )
UpperCamelCase = nn.silu(UpperCamelCase__ )
for block in self.blocks:
UpperCamelCase = block(UpperCamelCase__ )
UpperCamelCase = nn.silu(UpperCamelCase__ )
UpperCamelCase = self.conv_out(UpperCamelCase__ )
return embedding
@flax_register_to_config
class SCREAMING_SNAKE_CASE ( nn.Module , _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = (320, 640, 1_280, 1_280)
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = 1_280
_SCREAMING_SNAKE_CASE = 0.0
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = jnp.floataa
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = "rgb"
_SCREAMING_SNAKE_CASE = (16, 32, 96, 256)
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray ):
"""simple docstring"""
UpperCamelCase = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
UpperCamelCase = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase = jnp.zeros(UpperCamelCase__ , dtype=jnp.floataa )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )["params"]
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.block_out_channels
UpperCamelCase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase = FlaxTimestepEmbedding(UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase = self.only_cross_attention
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = block_out_channels[0]
UpperCamelCase = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase = output_channel
UpperCamelCase = block_out_channels[i]
UpperCamelCase = i == len(UpperCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase = FlaxCrossAttnDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase = FlaxDownBlockaD(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(UpperCamelCase__ )
for _ in range(self.layers_per_block ):
UpperCamelCase = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
if not is_final_block:
UpperCamelCase = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(UpperCamelCase__ )
UpperCamelCase = down_blocks
UpperCamelCase = controlnet_down_blocks
# mid
UpperCamelCase = block_out_channels[-1]
UpperCamelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=UpperCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase = nn.Conv(
UpperCamelCase__ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
UpperCamelCase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase = jnp.flip(UpperCamelCase__ , axis=1 )
# 1. time
if not isinstance(UpperCamelCase__ , jnp.ndarray ):
UpperCamelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(UpperCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase = jnp.expand_dims(UpperCamelCase__ , 0 )
UpperCamelCase = self.time_proj(UpperCamelCase__ )
UpperCamelCase = self.time_embedding(UpperCamelCase__ )
# 2. pre-process
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
UpperCamelCase = self.conv_in(UpperCamelCase__ )
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
UpperCamelCase = self.controlnet_cond_embedding(UpperCamelCase__ )
sample += controlnet_cond
# 3. down
UpperCamelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase , UpperCamelCase = down_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
else:
UpperCamelCase , UpperCamelCase = down_block(UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase = self.mid_block(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , deterministic=not train )
# 5. contronet blocks
UpperCamelCase = ()
for down_block_res_sample, controlnet_block in zip(UpperCamelCase__ , self.controlnet_down_blocks ):
UpperCamelCase = controlnet_block(UpperCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase = controlnet_down_block_res_samples
UpperCamelCase = self.controlnet_mid_block(UpperCamelCase__ )
# 6. scaling
UpperCamelCase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=UpperCamelCase__ , mid_block_res_sample=UpperCamelCase__ )
| 28 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any]=1_3 , UpperCamelCase__ : Optional[int]=6_4 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=3_2 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Optional[int]=3_7 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : Any=[1, 1_6, 4, 4] , UpperCamelCase__ : str=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = scope
UpperCamelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCamelCase = (self.image_size // 3_2) ** 2
UpperCamelCase = num_patches + 1
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 1_6, 3_2],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase__ , )
def A ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = ViTHybridModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.type_sequence_label_size
UpperCamelCase = ViTHybridForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ViTHybridModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=UpperCamelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCamelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def A ( self : Tuple ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ViTHybridModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Tuple ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
UpperCamelCase = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' )
UpperCamelCase = model(**UpperCamelCase__ )
UpperCamelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCamelCase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 28 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCamelCase : str = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PegasusConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=1_3 , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=9_9 , UpperCamelCase__ : Union[str, Any]=3_2 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : List[Any]=3_7 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=2_0 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : int=0 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCamelCase = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def A ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = 2_0
UpperCamelCase = model_class_name(UpperCamelCase__ )
UpperCamelCase = model.encode(inputs_dict['input_ids'] )
UpperCamelCase , UpperCamelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , )
UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase__ , )
UpperCamelCase = model.decode(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def A ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = 2_0
UpperCamelCase = model_class_name(UpperCamelCase__ )
UpperCamelCase = model.encode(inputs_dict['input_ids'] )
UpperCamelCase , UpperCamelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , )
UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , )
UpperCamelCase = model.decode(UpperCamelCase__ , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ )
UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __lowerCamelCase ( A__ , A__ , A__ , A__=None , A__=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
UpperCamelCase = np.not_equal(A__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = FlaxPegasusModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = model_class(UpperCamelCase__ )
@jax.jit
def encode_jitted(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=None , **UpperCamelCase__ : Optional[int] ):
return model.encode(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
with self.subTest('JIT Enabled' ):
UpperCamelCase = encode_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase = encode_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
UpperCamelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ):
return model.decode(
decoder_input_ids=UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , encoder_outputs=UpperCamelCase__ , )
with self.subTest('JIT Enabled' ):
UpperCamelCase = decode_jitted(**UpperCamelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase = decode_jitted(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : Any ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('google/pegasus-large' , from_pt=UpperCamelCase__ )
UpperCamelCase = np.ones((1, 1) )
UpperCamelCase = model(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
UpperCamelCase = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
UpperCamelCase = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
UpperCamelCase = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='np' , truncation=UpperCamelCase__ , max_length=5_1_2 , padding=UpperCamelCase__ )
UpperCamelCase = model.generate(**UpperCamelCase__ , num_beams=2 ).sequences
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
assert tgt_text == decoded
| 28 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase : Optional[int] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCamelCase : Union[str, Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCamelCase : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase : List[Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCamelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) )
UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(A__ )
UpperCamelCase = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) )
UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ )
UpperCamelCase = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 28 | 1 |
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""image_processor"""]
_SCREAMING_SNAKE_CASE = """SamImageProcessor"""
def __init__( self : List[str] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
UpperCamelCase = self.image_processor
UpperCamelCase = -1_0
UpperCamelCase = self.image_processor.size['longest_edge']
def __call__( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# pop arguments that are not used in the foward but used nevertheless
UpperCamelCase = encoding_image_processor['original_sizes']
if hasattr(UpperCamelCase__ , 'numpy' ): # Checks if Torch or TF tensor
UpperCamelCase = original_sizes.numpy()
UpperCamelCase , UpperCamelCase , UpperCamelCase = self._check_and_preprocess_points(
input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , input_boxes=UpperCamelCase__ , )
UpperCamelCase = self._normalize_and_convert(
UpperCamelCase__ , UpperCamelCase__ , input_points=UpperCamelCase__ , input_labels=UpperCamelCase__ , input_boxes=UpperCamelCase__ , return_tensors=UpperCamelCase__ , )
return encoding_image_processor
def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Any="pt" , ):
"""simple docstring"""
if input_points is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
UpperCamelCase = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , original_sizes[0] ) for point in input_points
]
else:
UpperCamelCase = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , UpperCamelCase__ )
for point, original_size in zip(UpperCamelCase__ , UpperCamelCase__ )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
UpperCamelCase , UpperCamelCase = self._pad_points_and_labels(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = np.array(UpperCamelCase__ )
if input_labels is not None:
UpperCamelCase = np.array(UpperCamelCase__ )
if input_boxes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
UpperCamelCase = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , original_sizes[0] , is_bounding_box=UpperCamelCase__ )
for box in input_boxes
]
else:
UpperCamelCase = [
self._normalize_coordinates(self.target_size , UpperCamelCase__ , UpperCamelCase__ , is_bounding_box=UpperCamelCase__ )
for box, original_size in zip(UpperCamelCase__ , UpperCamelCase__ )
]
UpperCamelCase = np.array(UpperCamelCase__ )
if input_boxes is not None:
if return_tensors == "pt":
UpperCamelCase = torch.from_numpy(UpperCamelCase__ )
# boxes batch size of 1 by default
UpperCamelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
UpperCamelCase = tf.convert_to_tensor(UpperCamelCase__ )
# boxes batch size of 1 by default
UpperCamelCase = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'input_boxes': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
UpperCamelCase = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
UpperCamelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
UpperCamelCase = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
UpperCamelCase = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'input_points': input_points} )
if input_labels is not None:
if return_tensors == "pt":
UpperCamelCase = torch.from_numpy(UpperCamelCase__ )
# point batch size of 1 by default
UpperCamelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
UpperCamelCase = tf.convert_to_tensor(UpperCamelCase__ )
# point batch size of 1 by default
UpperCamelCase = tf.expand_dims(UpperCamelCase__ , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'input_labels': input_labels} )
return encoding_image_processor
def A ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = max([point.shape[0] for point in input_points] )
UpperCamelCase = []
for i, point in enumerate(UpperCamelCase__ ):
if point.shape[0] != expected_nb_points:
UpperCamelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
UpperCamelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(UpperCamelCase__ )
UpperCamelCase = processed_input_points
return input_points, input_labels
def A ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=False ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = original_size
UpperCamelCase , UpperCamelCase = self.image_processor._get_preprocess_shape(UpperCamelCase__ , longest_edge=UpperCamelCase__ )
UpperCamelCase = deepcopy(UpperCamelCase__ ).astype(UpperCamelCase__ )
if is_bounding_box:
UpperCamelCase = coords.reshape(-1 , 2 , 2 )
UpperCamelCase = coords[..., 0] * (new_w / old_w)
UpperCamelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
UpperCamelCase = coords.reshape(-1 , 4 )
return coords
def A ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[int]=None , ):
"""simple docstring"""
if input_points is not None:
if hasattr(UpperCamelCase__ , 'numpy' ): # Checks for TF or Torch tensor
UpperCamelCase = input_points.numpy().tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(input_points[0] , UpperCamelCase__ ):
raise ValueError('Input points must be a list of list of floating points.' )
UpperCamelCase = [np.array(UpperCamelCase__ ) for input_point in input_points]
else:
UpperCamelCase = None
if input_labels is not None:
if hasattr(UpperCamelCase__ , 'numpy' ):
UpperCamelCase = input_labels.numpy().tolist()
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not isinstance(input_labels[0] , UpperCamelCase__ ):
raise ValueError('Input labels must be a list of list integers.' )
UpperCamelCase = [np.array(UpperCamelCase__ ) for label in input_labels]
else:
UpperCamelCase = None
if input_boxes is not None:
if hasattr(UpperCamelCase__ , 'numpy' ):
UpperCamelCase = input_boxes.numpy().tolist()
if (
not isinstance(UpperCamelCase__ , UpperCamelCase__ )
or not isinstance(input_boxes[0] , UpperCamelCase__ )
or not isinstance(input_boxes[0][0] , UpperCamelCase__ )
):
raise ValueError('Input boxes must be a list of list of list of floating points.' )
UpperCamelCase = [np.array(UpperCamelCase__ ).astype(np.floataa ) for box in input_boxes]
else:
UpperCamelCase = None
return input_points, input_labels, input_boxes
@property
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(UpperCamelCase__ ) )
def A ( self : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return self.image_processor.post_process_masks(*UpperCamelCase__ , **UpperCamelCase__ )
| 28 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ):
"""simple docstring"""
UpperCamelCase = sigma_max
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
raise NotImplementedError()
| 28 | 1 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : Optional[int] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
if tokenize_kwargs is None:
UpperCamelCase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
UpperCamelCase = truncation
UpperCamelCase = tokenize_kwargs
UpperCamelCase = {}
if return_tensors is not None:
UpperCamelCase = return_tensors
return preprocess_params, {}, postprocess_params
def A ( self : List[Any] , UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.framework
UpperCamelCase = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def A ( self : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Tuple , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : str ):
"""simple docstring"""
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_lowerCamelCase : Union[str, Any] = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """albert"""
def __init__( self : int , UpperCamelCase__ : Optional[int]=3_0_0_0_0 , UpperCamelCase__ : Union[str, Any]=1_2_8 , UpperCamelCase__ : int=4_0_9_6 , UpperCamelCase__ : Dict=1_2 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : List[Any]=6_4 , UpperCamelCase__ : Any=1_6_3_8_4 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : str="gelu_new" , UpperCamelCase__ : str=0 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : Tuple=1E-1_2 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : int="absolute" , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : str=3 , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = embedding_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_hidden_groups
UpperCamelCase = num_attention_heads
UpperCamelCase = inner_group_num
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout_prob
UpperCamelCase = position_embedding_type
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@property
def A ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : List[str] = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """gptsan-japanese"""
_SCREAMING_SNAKE_CASE = [
"""past_key_values""",
]
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : int , UpperCamelCase__ : Tuple=3_6_0_0_0 , UpperCamelCase__ : int=1_2_8_0 , UpperCamelCase__ : str=1_0_2_4 , UpperCamelCase__ : Dict=8_1_9_2 , UpperCamelCase__ : int=4_0_9_6 , UpperCamelCase__ : List[Any]=1_2_8 , UpperCamelCase__ : List[str]=1_0 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[str]=1_6 , UpperCamelCase__ : Union[str, Any]=1_6 , UpperCamelCase__ : Tuple=1_2_8 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[Any]="float32" , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[Any]=0.0_0_2 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=3_5_9_9_8 , UpperCamelCase__ : Union[str, Any]=3_5_9_9_5 , UpperCamelCase__ : int=3_5_9_9_9 , **UpperCamelCase__ : int , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = d_ff
UpperCamelCase = d_ext
UpperCamelCase = d_spout
UpperCamelCase = num_switch_layers
UpperCamelCase = num_ext_layers
UpperCamelCase = num_switch_layers + num_ext_layers
UpperCamelCase = num_heads
UpperCamelCase = num_experts
UpperCamelCase = expert_capacity
UpperCamelCase = dropout_rate
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = router_bias
UpperCamelCase = router_jitter_noise
UpperCamelCase = router_dtype
UpperCamelCase = router_ignore_padding_tokens
UpperCamelCase = output_hidden_states
UpperCamelCase = output_attentions
UpperCamelCase = initializer_factor
UpperCamelCase = output_router_logits
UpperCamelCase = use_cache
super().__init__(
separator_token_id=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
| 28 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 28 |
'''simple docstring'''
_lowerCamelCase : int = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 28 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """distilbert"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[int]=3_0_5_2_2 , UpperCamelCase__ : List[Any]=5_1_2 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : int=6 , UpperCamelCase__ : int=1_2 , UpperCamelCase__ : str=7_6_8 , UpperCamelCase__ : Optional[Any]=4 * 7_6_8 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Any=0.2 , UpperCamelCase__ : Tuple=0 , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = sinusoidal_pos_embds
UpperCamelCase = n_layers
UpperCamelCase = n_heads
UpperCamelCase = dim
UpperCamelCase = hidden_dim
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation
UpperCamelCase = initializer_range
UpperCamelCase = qa_dropout
UpperCamelCase = seq_classif_dropout
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@property
def A ( self : int ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase = model_name.find('patch' )
UpperCamelCase = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
UpperCamelCase = XCLIPVisionConfig(patch_size=A__ , num_frames=A__ )
if "large" in model_name:
UpperCamelCase = 768
UpperCamelCase = 3_072
UpperCamelCase = 12
UpperCamelCase = 1_024
UpperCamelCase = 4_096
UpperCamelCase = 16
UpperCamelCase = 24
UpperCamelCase = 768
UpperCamelCase = 3_072
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = 336
UpperCamelCase = XCLIPConfig.from_text_vision_configs(A__ , A__ )
if "large" in model_name:
UpperCamelCase = 768
return config
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
UpperCamelCase = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
UpperCamelCase = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
UpperCamelCase = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
UpperCamelCase = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
UpperCamelCase = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
UpperCamelCase = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
UpperCamelCase = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
UpperCamelCase = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
UpperCamelCase = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
UpperCamelCase = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
UpperCamelCase = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
UpperCamelCase = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
UpperCamelCase = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
UpperCamelCase = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
UpperCamelCase = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
UpperCamelCase = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
UpperCamelCase = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
UpperCamelCase = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
UpperCamelCase = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(A__ )
if "attn.in_proj" in key:
UpperCamelCase = key.split('.' )
if key.startswith('visual' ):
UpperCamelCase = key_split[3]
UpperCamelCase = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[
:dim
]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
elif key.startswith('mit' ):
UpperCamelCase = key_split[2]
UpperCamelCase = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = key_split[2]
UpperCamelCase = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = rename_key(A__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase = val.T
UpperCamelCase = val
return orig_state_dict
def __lowerCamelCase ( A__ ) -> Optional[Any]:
"""simple docstring"""
if num_frames == 8:
UpperCamelCase = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
UpperCamelCase = 'eating_spaghetti.npy'
elif num_frames == 32:
UpperCamelCase = 'eating_spaghetti_32_frames.npy'
UpperCamelCase = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=A__ , repo_type='dataset' , )
UpperCamelCase = np.load(A__ )
return list(A__ )
def __lowerCamelCase ( A__ , A__=None , A__=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
UpperCamelCase = model_to_url[model_name]
UpperCamelCase = 8
if "16-frames" in model_name:
UpperCamelCase = 16
elif "shot" in model_name:
UpperCamelCase = 32
UpperCamelCase = get_xclip_config(A__ , A__ )
UpperCamelCase = XCLIPModel(A__ )
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase = 'pytorch_model.bin'
gdown.cached_download(A__ , A__ , quiet=A__ )
UpperCamelCase = torch.load(A__ , map_location='cpu' )['model']
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ )['model']
UpperCamelCase = convert_state_dict(A__ , A__ )
UpperCamelCase = XCLIPModel(A__ )
UpperCamelCase , UpperCamelCase = model.load_state_dict(A__ , strict=A__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
UpperCamelCase = VideoMAEImageProcessor(size=A__ )
UpperCamelCase = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
UpperCamelCase = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
UpperCamelCase = XCLIPProcessor(image_processor=A__ , tokenizer=A__ )
UpperCamelCase = prepare_video(A__ )
UpperCamelCase = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=A__ , return_tensors='pt' , padding=A__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCamelCase = model(**A__ )
# Verify outputs
UpperCamelCase = outputs.logits_per_video
UpperCamelCase = logits_per_video.softmax(dim=1 )
print('Probs:' , A__ )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase = torch.tensor([[7.0_9_9_9e-0_4, 9.9_8_8_3e-0_1, 4.5_5_8_0e-0_4]] )
elif model_name == "xclip-base-patch16":
UpperCamelCase = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase = torch.tensor([[7.6_9_3_7e-0_4, 9.9_7_2_8e-0_1, 1.9_4_7_3e-0_3]] )
elif model_name == "xclip-large-patch14":
UpperCamelCase = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = torch.tensor([[3.3_8_7_7e-0_4, 9.9_9_3_7e-0_1, 2.8_8_8_8e-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase = torch.tensor([[3.8_5_5_4e-0_4, 9.9_9_2_9e-0_1, 3.2_7_5_4e-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase = torch.tensor([[7.1_8_9_0e-0_6, 9.9_9_9_4e-0_1, 5.6_5_5_9e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase = torch.tensor([[1.0_3_2_0e-0_5, 9.9_9_9_3e-0_1, 6.2_4_3_5e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase = torch.tensor([[4.1_3_7_7e-0_6, 9.9_9_9_0e-0_1, 9.8_3_8_6e-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase = torch.tensor([[4.1_3_4_7e-0_5, 9.9_9_6_2e-0_1, 3.3_4_1_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase = torch.tensor([[8.5_8_5_7e-0_5, 9.9_9_2_8e-0_1, 6.3_2_9_1e-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase = torch.tensor([[9.8_2_1_9e-0_4, 9.9_5_9_3e-0_1, 3.0_8_6_3e-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase = torch.tensor([[3.5_0_8_2e-0_4, 9.9_7_8_5e-0_1, 1.7_9_6_6e-0_3]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(A__ , A__ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(A__ , organization='nielsr' )
processor.push_to_hub(A__ , organization='nielsr' )
slow_tokenizer.push_to_hub(A__ , organization='nielsr' )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : str = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowerCamelCase : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : int = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
_lowerCamelCase : List[str] = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
_lowerCamelCase : int = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ElectraTokenizer
def __init__( self : Tuple , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict="[UNK]" , UpperCamelCase__ : str="[SEP]" , UpperCamelCase__ : List[Any]="[PAD]" , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : Optional[Any]="[MASK]" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : int=None , **UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(UpperCamelCase__ , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**UpperCamelCase__ )
UpperCamelCase = do_lower_case
def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=None ):
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 28 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCamelCase ( A__ , A__ = True , A__ = math.inf , A__ = -math.inf , A__ = math.inf , A__ = -math.inf , A__ = False , A__ = 100 , A__ = 0.01 , A__ = 1 , ) -> Any:
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = search_prob
UpperCamelCase = start_temperate
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = None
while not search_end:
UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase = current_state
scores.append(A__ )
iterations += 1
UpperCamelCase = None
UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase = random.randint(0 , len(A__ ) - 1 ) # picking a random neighbor
UpperCamelCase = neighbors.pop(A__ )
UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase = picked_neighbor
else:
UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase = picked_neighbor
UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase = True
else:
UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A__ ) , A__ )
plt.xlabel('Iterations' )
plt.ylabel('Function values' )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCamelCase ( A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowerCamelCase : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : Optional[Any] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
_lowerCamelCase : Optional[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : List[Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
return (3 * x**2) - (6 * y)
_lowerCamelCase : Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : Tuple = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
_lowerCamelCase : List[Any] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCamelCase : str = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'''{local_min.score()}'''
)
| 28 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int:
"""simple docstring"""
def get_dataset(A__ ):
UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> int:
"""simple docstring"""
UpperCamelCase = []
for epoch in range(A__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase = batch
UpperCamelCase = model(A__ )
UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ )
accelerator.backward(A__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A ( self : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
return x * self.a + self.b
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
# Train baseline
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' )
accelerator.save_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' )
accelerator.save_state(UpperCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase__ )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ )
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch.tensor([1, 2, 3] )
UpperCamelCase = torch.tensor([2, 3, 4] )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(net.parameters() )
UpperCamelCase = Accelerator()
with self.assertRaises(UpperCamelCase__ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def A ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
UpperCamelCase = scheduler.state_dict()
train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(UpperCamelCase__ , scheduler.state_dict() )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = "/tmp/accelerate/state_checkpointing"
_lowerCamelCase : Union[str, Any] = DummyModel()
_lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_lowerCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCamelCase ,_lowerCamelCase : Tuple = dummy_dataloaders()
_lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCamelCase ,_lowerCamelCase : Tuple = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCamelCase : Any = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_lowerCamelCase : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_lowerCamelCase : Optional[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_lowerCamelCase : Dict = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
if not isinstance(A__ , A__ ):
UpperCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(A__ )
if number < 0:
return False
UpperCamelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : List[str] = 5_0000
_lowerCamelCase : Optional[int] = 5000
_lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__)
_lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
UpperCamelCase = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
UpperCamelCase = func(A__ , **A__ )
print('shuffling dataset' )
UpperCamelCase = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(A__ ) )
UpperCamelCase = func(
A__ , **A__ )
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28 | 1 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 28 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase__ )
else:
scores.append(UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 28 | 1 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : str = "▁"
_lowerCamelCase : Any = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_lowerCamelCase : str = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_lowerCamelCase : Optional[int] = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_lowerCamelCase : int = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
_lowerCamelCase : Dict = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["input_ids"]
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = RESOURCE_FILES_NAMES
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Any=False , UpperCamelCase__ : int="utf8" , UpperCamelCase__ : Tuple="[UNK]" , UpperCamelCase__ : List[Any]="[SEP]" , UpperCamelCase__ : str="[PAD]" , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : int , ):
"""simple docstring"""
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
UpperCamelCase = do_lower_case
UpperCamelCase = sentencepiece_model_ckpt
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCamelCase = self.load_vocab(filepath=UpperCamelCase__ )
else:
UpperCamelCase = {self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCamelCase = {v: k for k, v in self.vocab.items()}
def A ( self : Optional[int] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
if text is None:
return None
UpperCamelCase = self.tokenize(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = '', []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCamelCase = self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
UpperCamelCase = unicodedata.normalize('NFKC' , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase = normalized_text, [], 0
if self.do_lower_case:
UpperCamelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCamelCase = token[1:]
UpperCamelCase = text[offset:].index(UpperCamelCase__ ) + offset
UpperCamelCase = start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCamelCase = end
return token_mapping
@property
def A ( self : List[str] ):
"""simple docstring"""
return len(self.vocab )
def A ( self : List[str] ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def A ( self : Optional[int] , UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def A ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=6_4 , UpperCamelCase__ : Optional[Any]=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
UpperCamelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
UpperCamelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
UpperCamelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
UpperCamelCase = self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
UpperCamelCase = self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = []
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
UpperCamelCase = 0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
UpperCamelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCamelCase = i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def A ( self : Tuple , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.convert_ids_to_tokens(UpperCamelCase__ )
UpperCamelCase = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def A ( self : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def A ( self : Optional[int] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def A ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : str=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def A ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def A ( self : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def A ( self : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def A ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def A ( self : str , UpperCamelCase__ : str ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
UpperCamelCase = unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def A ( self : List[Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = {}
with io.open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
UpperCamelCase = line.rstrip('\n' )
UpperCamelCase = int(UpperCamelCase__ )
return token_to_idx
def A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = 0
if os.path.isdir(UpperCamelCase__ ):
UpperCamelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCamelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase = token_index
writer.write(token + '\n' )
index += 1
UpperCamelCase = os.path.join(UpperCamelCase__ , 'sentencepiece.bpe.model' )
with open(UpperCamelCase__ , 'wb' ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 28 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> Image:
"""simple docstring"""
def brightness(A__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 28 | 1 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowerCamelCase : Any = logging.getLogger(__name__)
def __lowerCamelCase ( A__ , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = False , ) -> int:
"""simple docstring"""
UpperCamelCase = bnb_quantization_config.load_in_abit
UpperCamelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
UpperCamelCase = []
# custom device map
if isinstance(A__ , A__ ) and len(device_map.keys() ) > 1:
UpperCamelCase = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase = get_keys_to_not_convert(A__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A__ )
UpperCamelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase = []
UpperCamelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A__ )
# compatibility with peft
UpperCamelCase = load_in_abit
UpperCamelCase = load_in_abit
UpperCamelCase = get_parameter_device(A__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
UpperCamelCase = replace_with_bnb_layers(A__ , A__ , modules_to_not_convert=A__ )
# convert param to the right dtype
UpperCamelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase = name.replace('.weight' , '' ).replace('.bias' , '' )
UpperCamelCase = getattr(A__ , A__ , A__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A__ ):
param.to(A__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
UpperCamelCase = replace_with_bnb_layers(
A__ , A__ , modules_to_not_convert=A__ )
UpperCamelCase = get_quantized_model_device_map(
A__ , A__ , A__ , max_memory=A__ , no_split_module_classes=A__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase = True
UpperCamelCase = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
A__ , A__ , A__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A__ , offload_state_dict=A__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A__ , device_map=A__ , offload_dir=A__ )
def __lowerCamelCase ( A__ , A__ , A__=None , A__=None , A__=None ) -> List[Any]:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(A__ , A__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
UpperCamelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase = {}
UpperCamelCase = special_dtypes
UpperCamelCase = no_split_module_classes
UpperCamelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase = get_balanced_memory(
A__ , low_zero=(device_map == 'balanced_low_0') , max_memory=A__ , **A__ , )
UpperCamelCase = max_memory
UpperCamelCase = infer_auto_device_map(A__ , **A__ )
if isinstance(A__ , A__ ):
# check if don't have any quantized module on the cpu
UpperCamelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( A__ , A__ , A__=None , A__=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def __lowerCamelCase ( A__ , A__ , A__=None , A__=None , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(A__ )
if isinstance(A__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase = '.'.join(A__ )
UpperCamelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
UpperCamelCase = module.weight.data
if module.bias is not None:
UpperCamelCase = module.bias.data
bnb_module.requires_grad_(A__ )
setattr(A__ , A__ , A__ )
UpperCamelCase = True
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
UpperCamelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
# Create a copy of the model
with init_empty_weights():
UpperCamelCase = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(A__ , [] )
UpperCamelCase = len(A__ ) > 0
# Check if it is a base model
UpperCamelCase = False
if hasattr(A__ , 'base_model_prefix' ):
UpperCamelCase = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(A__ ) - set(A__ )
UpperCamelCase = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
UpperCamelCase = ['.weight', '.bias']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(A__ , '' )
filtered_module_names.append(A__ )
return filtered_module_names
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
for m in model.modules():
if isinstance(A__ , bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
return next(parameter.parameters() ).device
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str:
"""simple docstring"""
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A__ , A__ , 0 , dtype=A__ , value=A__ )
UpperCamelCase = param_name
UpperCamelCase = model
if "." in tensor_name:
UpperCamelCase = tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase = getattr(A__ , A__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
# offload weights
UpperCamelCase = False
offload_weight(module._parameters[tensor_name] , A__ , A__ , index=A__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , A__ , index=A__ , )
else:
offload_weight(A__ , A__ , A__ , index=A__ )
offload_weight(A__ , param_name.replace('weight' , 'SCB' ) , A__ , index=A__ )
set_module_tensor_to_device(A__ , A__ , 'meta' , dtype=A__ , value=torch.empty(*param.size() ) )
| 28 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 28 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = 1_000
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type='dataset' ) ) , 'r' ) )
UpperCamelCase = {int(A__ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = UpperCamelCase = CvtConfig(num_labels=A__ , idalabel=A__ , labelaid=A__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
UpperCamelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
UpperCamelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCamelCase = [2, 2, 20]
UpperCamelCase = [3, 12, 16]
UpperCamelCase = [192, 768, 1_024]
UpperCamelCase = CvtForImageClassification(A__ )
UpperCamelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
UpperCamelCase = image_size
UpperCamelCase = torch.load(A__ , map_location=torch.device('cpu' ) )
UpperCamelCase = OrderedDict()
UpperCamelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCamelCase = list_of_state_dict + cls_token(A__ )
UpperCamelCase = list_of_state_dict + embeddings(A__ )
for cnt in range(config.depth[idx] ):
UpperCamelCase = list_of_state_dict + attention(A__ , A__ )
UpperCamelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A__ )
for i in range(len(A__ ) ):
UpperCamelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Any = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 28 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = 3_0_0
return config
def A ( self : Tuple ):
"""simple docstring"""
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = MraModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = MraModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = MraForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = MraForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = ()
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = MraModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MraModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='MRA does not output attentions' )
def A ( self : List[str] ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 28 | 1 |
'''simple docstring'''
_lowerCamelCase : int = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 28 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : Union[str, Any] = "\\n\n"
_lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase = 'cuda'
else:
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase = model.to(UpperCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase = model.config.max_length - 1
else:
UpperCamelCase = model.config.max_length
UpperCamelCase = tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ )
UpperCamelCase = encodings['input_ids']
UpperCamelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase = []
UpperCamelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ):
UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) )
UpperCamelCase = encoded_texts[start_index:end_index]
UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ )
UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 )
UpperCamelCase = encoded_batch
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits
UpperCamelCase = out_logits[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = attn_mask[..., 1:].contiguous()
UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
UpperCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCamelCase = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCamelCase = max(len(A__ ) , len(A__ ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) , b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 50 ) -> int:
"""simple docstring"""
UpperCamelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 50 ) -> int:
"""simple docstring"""
UpperCamelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list:
"""simple docstring"""
UpperCamelCase = len(A__ )
for i in range(1 , A__ ):
UpperCamelCase = collection[i]
UpperCamelCase = 0
UpperCamelCase = i - 1
while low <= high:
UpperCamelCase = (low + high) // 2
if val < collection[mid]:
UpperCamelCase = mid - 1
else:
UpperCamelCase = mid + 1
for j in range(A__ , A__ , -1 ):
UpperCamelCase = collection[j - 1]
UpperCamelCase = val
return collection
if __name__ == "__main__":
_lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 28 | 1 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __lowerCamelCase ( A__ , A__=None ) -> Any:
"""simple docstring"""
UpperCamelCase = None
if token is not None:
UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
UpperCamelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
UpperCamelCase = requests.get(A__ , headers=A__ ).json()
UpperCamelCase = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
UpperCamelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(A__ ):
UpperCamelCase = requests.get(url + F"""&page={i + 2}""" , headers=A__ ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def __lowerCamelCase ( A__ , A__=None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = None
if token is not None:
UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
UpperCamelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
UpperCamelCase = requests.get(A__ , headers=A__ ).json()
UpperCamelCase = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
UpperCamelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(A__ ):
UpperCamelCase = requests.get(url + F"""&page={i + 2}""" , headers=A__ ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = None
if token is not None:
UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
UpperCamelCase = requests.get(A__ , headers=A__ , allow_redirects=A__ )
UpperCamelCase = result.headers['Location']
UpperCamelCase = requests.get(A__ , allow_redirects=A__ )
UpperCamelCase = os.path.join(A__ , F"""{artifact_name}.zip""" )
with open(A__ , 'wb' ) as fp:
fp.write(response.content )
def __lowerCamelCase ( A__ , A__=None ) -> str:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = None
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(A__ ) as f:
for line in f:
UpperCamelCase = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCamelCase = line[: line.index(': ' )]
UpperCamelCase = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
UpperCamelCase = line[len('FAILED ' ) :]
failed_tests.append(A__ )
elif filename == "job_name.txt":
UpperCamelCase = line
if len(A__ ) != len(A__ ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(A__ )} for `errors` """
F"""and {len(A__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
UpperCamelCase = None
if job_name and job_links:
UpperCamelCase = job_links.get(A__ , A__ )
# A list with elements of the form (line of error, error, failed test)
UpperCamelCase = [x + [y] + [job_link] for x, y in zip(A__ , A__ )]
return result
def __lowerCamelCase ( A__ , A__=None ) -> Tuple:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(A__ , job_links=A__ ) )
return errors
def __lowerCamelCase ( A__ , A__=None ) -> Tuple:
"""simple docstring"""
UpperCamelCase = Counter()
counter.update([x[1] for x in logs] )
UpperCamelCase = counter.most_common()
UpperCamelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCamelCase = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCamelCase = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = test.split('::' )[0]
if test.startswith('tests/models/' ):
UpperCamelCase = test.split('/' )[2]
else:
UpperCamelCase = None
return test
def __lowerCamelCase ( A__ , A__=None ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCamelCase = [x for x in logs if x[2] is not None]
UpperCamelCase = {x[2] for x in logs}
UpperCamelCase = {}
for test in tests:
UpperCamelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCamelCase = counter.most_common()
UpperCamelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCamelCase = sum(error_counts.values() )
if n_errors > 0:
UpperCamelCase = {'count': n_errors, 'errors': error_counts}
UpperCamelCase = dict(sorted(r.items() , key=lambda A__ : item[1]["count"] , reverse=A__ ) )
return r
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = '| no. | error | status |'
UpperCamelCase = '|-:|:-|:-|'
UpperCamelCase = [header, sep]
for error in reduced_by_error:
UpperCamelCase = reduced_by_error[error]['count']
UpperCamelCase = F"""| {count} | {error[:100]} | |"""
lines.append(A__ )
return "\n".join(A__ )
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
UpperCamelCase = '| model | no. of errors | major error | count |'
UpperCamelCase = '|-:|-:|-:|-:|'
UpperCamelCase = [header, sep]
for model in reduced_by_model:
UpperCamelCase = reduced_by_model[model]['count']
UpperCamelCase , UpperCamelCase = list(reduced_by_model[model]['errors'].items() )[0]
UpperCamelCase = F"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(A__ )
return "\n".join(A__ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_lowerCamelCase : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowerCamelCase : int = get_job_links(args.workflow_run_id, token=args.token)
_lowerCamelCase : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowerCamelCase : Optional[int] = k.find(" / ")
_lowerCamelCase : int = k[index + len(" / ") :]
_lowerCamelCase : int = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : str = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowerCamelCase : Union[str, Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowerCamelCase : Optional[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowerCamelCase : Optional[Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : Optional[int] = reduce_by_error(errors)
_lowerCamelCase : Tuple = reduce_by_model(errors)
_lowerCamelCase : Optional[int] = make_github_table(reduced_by_error)
_lowerCamelCase : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 28 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase = []
for i in range(A__ ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ )
UpperCamelCase = 1.0 - self.betas
UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# setable values
UpperCamelCase = None
UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() )
UpperCamelCase = variance_type
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ):
"""simple docstring"""
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) )
UpperCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase = variance.log()
UpperCamelCase = beta.log()
UpperCamelCase = (predicted_variance + 1) / 2
UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
UpperCamelCase = self.alphas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = torch.clamp(
UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device )
UpperCamelCase = self._get_variance(
UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , )
if self.variance_type == "fixed_small_log":
UpperCamelCase = variance
elif self.variance_type == "learned_range":
UpperCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
UpperCamelCase = variance * variance_noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ):
"""simple docstring"""
UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase = timesteps.to(original_samples.device )
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
UpperCamelCase = 0
for i in range(1 , 1_001 ):
total += i**i
return str(A__ )[-10:]
if __name__ == "__main__":
print(solution())
| 28 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
| 28 | 1 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase : Optional[int] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCamelCase : Union[str, Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCamelCase : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase : List[Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCamelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) )
UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(A__ )
UpperCamelCase = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) )
UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ )
UpperCamelCase = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 28 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 1 |
'''simple docstring'''
import numpy as np
def __lowerCamelCase ( A__ ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __lowerCamelCase ( A__ ) -> np.array:
"""simple docstring"""
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28 | 1 |
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
UpperCamelCase = precision
UpperCamelCase = ceil(precision / 14 )
UpperCamelCase = 426_880 * Decimal(10_005 ).sqrt()
UpperCamelCase = 1
UpperCamelCase = 13_591_409
UpperCamelCase = Decimal(A__ )
for k in range(1 , A__ ):
UpperCamelCase = factorial(6 * k ) // (factorial(3 * k ) * factorial(A__ ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 28 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase : Optional[int] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCamelCase : Union[str, Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCamelCase : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase : List[Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCamelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) )
UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(A__ )
UpperCamelCase = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) )
UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ )
UpperCamelCase = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 28 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """mra"""
def __init__( self : List[str] , UpperCamelCase__ : Union[str, Any]=5_0_2_6_5 , UpperCamelCase__ : Optional[Any]=7_6_8 , UpperCamelCase__ : Dict=1_2 , UpperCamelCase__ : Optional[int]=1_2 , UpperCamelCase__ : int=3_0_7_2 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : str=5_1_2 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[Any]=0.0_2 , UpperCamelCase__ : int=1E-5 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[Any]="full" , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = block_per_row
UpperCamelCase = approx_mode
UpperCamelCase = initial_prior_first_n_blocks
UpperCamelCase = initial_prior_diagonal_n_blocks
| 28 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ):
"""simple docstring"""
UpperCamelCase = sigma_max
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
raise NotImplementedError()
| 28 | 1 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = VideoClassificationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ , top_k=2 )
UpperCamelCase = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
for example in examples:
UpperCamelCase = video_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'score': ANY(UpperCamelCase__ ), 'label': ANY(UpperCamelCase__ )},
{'score': ANY(UpperCamelCase__ ), 'label': ANY(UpperCamelCase__ )},
] , )
@require_torch
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCamelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCamelCase = pipeline(
'video-classification' , model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCamelCase = video_classifier(UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
] , )
@require_tf
def A ( self : List[Any] ):
"""simple docstring"""
pass
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if not nums:
return 0
UpperCamelCase = nums[0]
UpperCamelCase = 0
for num in nums[1:]:
UpperCamelCase , UpperCamelCase = (
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 | 1 |
'''simple docstring'''
from timeit import timeit
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
UpperCamelCase = 0
while number:
number &= number - 1
result += 1
return result
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
if number < 0:
raise ValueError('the value of input must not be negative' )
UpperCamelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
def do_benchmark(A__ ) -> None:
UpperCamelCase = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(A__ ) = }""" )
UpperCamelCase = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=A__ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(A__ ) = }""" )
UpperCamelCase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=A__ , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(A__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 28 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 | 1 |
'''simple docstring'''
from torch import nn
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 28 |
'''simple docstring'''
_lowerCamelCase : int = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 28 | 1 |
'''simple docstring'''
from timeit import timeit
_lowerCamelCase : List[str] = {
"MALAYALAM": True,
"String": False,
"rotor": True,
"level": True,
"A": True,
"BB": True,
"ABC": False,
"amanaplanacanalpanama": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = 0
UpperCamelCase = len(A__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = len(A__ ) // 2
UpperCamelCase = len(A__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(A__ ) )
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
if len(A__ ) <= 2:
return True
if s[0] == s[len(A__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
return s == s[::-1]
def __lowerCamelCase ( A__ ) -> None:
"""simple docstring"""
UpperCamelCase = F"""all({name}(key) is value for key, value in test_data.items())"""
UpperCamelCase = F"""from __main__ import test_data, {name}"""
UpperCamelCase = 500_000
UpperCamelCase = timeit(stmt=A__ , setup=A__ , number=A__ )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("a man a plan a canal panama")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("is_palindrome_slice")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("is_palindrome")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("is_palindrome_recursive")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("is_palindrome_traversal")
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_lowerCamelCase : Union[str, Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
UpperCamelCase = getattr(A__ , A__ )
if weight_type is not None:
UpperCamelCase = getattr(A__ , A__ ).shape
else:
UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCamelCase = None
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
elif name.split('.' )[0] == "proj":
UpperCamelCase = fairseq_model.proj
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(A__ )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , A__ )
if "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
UpperCamelCase = 'weight'
else:
UpperCamelCase = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A__ )
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(A__ , A__ , bias=A__ )
UpperCamelCase = emb.weight.data
return lin_layer
def __lowerCamelCase ( A__ ) -> Optional[int]:
"""simple docstring"""
with open(A__ , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [line.split(' ' )[0] for line in lines]
UpperCamelCase = len(A__ )
UpperCamelCase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(A__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = WavaVecaConfig.from_pretrained(A__ )
UpperCamelCase = SpeechaTextaConfig.from_pretrained(
A__ , vocab_size=A__ , decoder_layers=A__ , do_stable_layer_norm=A__ )
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCamelCase = model[0].eval()
# set weights for wav2vec2 encoder
UpperCamelCase = WavaVecaModel(A__ )
UpperCamelCase = recursively_load_weights_wavaveca(model.encoder , A__ )
UpperCamelCase = SpeechaTextaForCausalLM(A__ )
UpperCamelCase , UpperCamelCase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A__ )
# set output linear layer
unexpected_keys.remove('embed_out' )
UpperCamelCase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCamelCase = SpeechEncoderDecoderModel(encoder=A__ , decoder=A__ )
UpperCamelCase = False
# add projection layer
UpperCamelCase = nn.Parameter(projection_layer.weight )
UpperCamelCase = nn.Parameter(projection_layer.bias )
UpperCamelCase = create_vocab_dict(A__ )
with open(os.path.join(A__ , 'vocab.json' ) , 'w' ) as fp:
json.dump(A__ , A__ )
UpperCamelCase = SpeechaTextaTokenizer(os.path.join(A__ , 'vocab.json' ) )
tokenizer.save_pretrained(A__ )
UpperCamelCase = hf_wavavec.config.to_dict()
UpperCamelCase = tokenizer.pad_token_id
UpperCamelCase = tokenizer.bos_token_id
UpperCamelCase = tokenizer.eos_token_id
UpperCamelCase = 'speech_to_text_2'
UpperCamelCase = 'wav2vec2'
UpperCamelCase = SpeechEncoderDecoderConfig.from_dict(A__ )
hf_wavavec.save_pretrained(A__ )
feature_extractor.save_pretrained(A__ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=1_0224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
_lowerCamelCase : List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 28 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28 | 1 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> Image:
"""simple docstring"""
def brightness(A__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 28 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28 | 1 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __lowerCamelCase ( A__ ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCamelCase ( A__ , A__ , A__ ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(A__ , A__ )
# Predict target for test data
UpperCamelCase = xgb.predict(A__ )
UpperCamelCase = predictions.reshape(len(A__ ) , 1 )
return predictions
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
UpperCamelCase = fetch_california_housing()
UpperCamelCase , UpperCamelCase = data_handling(A__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = train_test_split(
A__ , A__ , test_size=0.25 , random_state=1 )
UpperCamelCase = xgboost(A__ , A__ , A__ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(A__ , A__ )}""" )
print(F"""Mean Square Error : {mean_squared_error(A__ , A__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 28 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int:
"""simple docstring"""
def get_dataset(A__ ):
UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(A__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = get_dataset(A__ )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
UpperCamelCase = DataLoader(A__ , shuffle=A__ , batch_size=A__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> int:
"""simple docstring"""
UpperCamelCase = []
for epoch in range(A__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase = batch
UpperCamelCase = model(A__ )
UpperCamelCase = torch.nn.functional.mse_loss(A__ , A__ )
accelerator.backward(A__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A ( self : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
return x * self.a + self.b
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase__ , automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
# Train baseline
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
UpperCamelCase = os.path.join(UpperCamelCase__ , 'initial' )
accelerator.save_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
UpperCamelCase = os.path.join(UpperCamelCase__ , 'checkpoint' )
accelerator.save_state(UpperCamelCase__ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase__ )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
UpperCamelCase = train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase__ )
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = train(2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
((UpperCamelCase) , (UpperCamelCase)) = model.a.item(), model.b.item()
UpperCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = torch.tensor([1, 2, 3] )
UpperCamelCase = torch.tensor([2, 3, 4] )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(net.parameters() )
UpperCamelCase = Accelerator()
with self.assertRaises(UpperCamelCase__ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def A ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
UpperCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase__ , step_size=1 , gamma=0.9_9 )
UpperCamelCase , UpperCamelCase = dummy_dataloaders()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save initial
accelerator.save_state()
UpperCamelCase = scheduler.state_dict()
train(3 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertNotEqual(UpperCamelCase__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(UpperCamelCase__ , scheduler.state_dict() )
def A ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCamelCase = DummyModel()
UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase__ , total_limit=2 )
# Train baseline
UpperCamelCase = Accelerator(project_dir=UpperCamelCase__ , project_config=UpperCamelCase__ )
UpperCamelCase = accelerator.prepare(UpperCamelCase__ )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ['torchrun', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = "/tmp/accelerate/state_checkpointing"
_lowerCamelCase : Union[str, Any] = DummyModel()
_lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
_lowerCamelCase : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
_lowerCamelCase ,_lowerCamelCase : Tuple = dummy_dataloaders()
_lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
_lowerCamelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
_lowerCamelCase ,_lowerCamelCase : Tuple = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
_lowerCamelCase : Any = group["params"][0].device
break
assert param_device.type == accelerator.device.type
_lowerCamelCase : Tuple = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
_lowerCamelCase : Optional[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
_lowerCamelCase : Dict = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 28 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : List[str] = 5_0000
_lowerCamelCase : Optional[int] = 5000
_lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__)
_lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
UpperCamelCase = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
UpperCamelCase = func(A__ , **A__ )
print('shuffling dataset' )
UpperCamelCase = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(A__ ) )
UpperCamelCase = func(
A__ , **A__ )
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28 | 1 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = RoFormerTokenizer
_SCREAMING_SNAKE_CASE = RoFormerTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
def A ( self : Dict ):
"""simple docstring"""
super().setUp()
def A ( self : Optional[Any] , **UpperCamelCase__ : List[str] ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase__ )
def A ( self : Any , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = '永和服装饰品有限公司,今天天气非常好'
UpperCamelCase = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase , UpperCamelCase = self.get_chinese_input_output_texts()
UpperCamelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase , UpperCamelCase = self.get_chinese_input_output_texts()
UpperCamelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
UpperCamelCase = tokens + [tokenizer.unk_token]
UpperCamelCase = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
pass
def A ( self : List[Any] ):
"""simple docstring"""
pass
def A ( self : Union[str, Any] ):
"""simple docstring"""
pass
| 28 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowerCamelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_lowerCamelCase : Optional[int] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_lowerCamelCase : str = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
UpperCamelCase = rouge_scorer.RougeScorer(rouge_types=UpperCamelCase__ , use_stemmer=UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = scoring.BootstrapAggregator()
else:
UpperCamelCase = []
for ref, pred in zip(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = scorer.score(UpperCamelCase__ , UpperCamelCase__ )
if use_aggregator:
aggregator.add_scores(UpperCamelCase__ )
else:
scores.append(UpperCamelCase__ )
if use_aggregator:
UpperCamelCase = aggregator.aggregate()
else:
UpperCamelCase = {}
for key in scores[0]:
UpperCamelCase = [score[key] for score in scores]
return result
| 28 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
_lowerCamelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
_lowerCamelCase : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """whisper"""
_SCREAMING_SNAKE_CASE = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str]=5_1_8_6_5 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Tuple=6 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : int=6 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Optional[Any]=1_5_3_6 , UpperCamelCase__ : int=1_5_3_6 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=5_0_2_5_7 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Optional[int]=0.0_2 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Tuple=1_5_0_0 , UpperCamelCase__ : List[str]=4_4_8 , UpperCamelCase__ : List[Any]=5_0_2_5_6 , UpperCamelCase__ : Any=5_0_2_5_6 , UpperCamelCase__ : Dict=5_0_2_5_6 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=[2_2_0, 5_0_2_5_6] , UpperCamelCase__ : int=False , UpperCamelCase__ : List[str]=2_5_6 , UpperCamelCase__ : str=False , UpperCamelCase__ : Tuple=0.0_5 , UpperCamelCase__ : Any=1_0 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : str=1_0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Tuple=7 , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = num_mel_bins
UpperCamelCase = d_model
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = max_source_positions
UpperCamelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
UpperCamelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
UpperCamelCase = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , suppress_tokens=UpperCamelCase__ , begin_suppress_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase = {0: 'batch'}
else:
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
return common_inputs
def A ( self : Optional[int] , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 2_2_0_5_0 , UpperCamelCase__ : float = 5.0 , UpperCamelCase__ : int = 2_2_0 , ):
"""simple docstring"""
UpperCamelCase = OrderedDict()
UpperCamelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase__ , framework=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , time_duration=UpperCamelCase__ , frequency=UpperCamelCase__ , )
UpperCamelCase = encoder_inputs['input_features'].shape[2]
UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = encoder_inputs.pop('input_features' )
UpperCamelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
UpperCamelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def A ( self : Dict ):
"""simple docstring"""
return 1E-3
| 28 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( A__ , A__ ) -> Image:
"""simple docstring"""
def brightness(A__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(A__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 1_000_000 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 1
UpperCamelCase = {1: 1}
for inputa in range(2 , A__ ):
UpperCamelCase = 0
UpperCamelCase = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
UpperCamelCase = (3 * number) + 1
counter += 1
if inputa not in counters:
UpperCamelCase = counter
if counter > pre_counter:
UpperCamelCase = inputa
UpperCamelCase = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 28 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 28 | 1 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __lowerCamelCase ( A__ , A__=7 ) -> Tuple:
"""simple docstring"""
UpperCamelCase = None
if token is not None:
UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
UpperCamelCase = '636036'
UpperCamelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
UpperCamelCase = requests.get(A__ , headers=A__ ).json()
return result["workflow_runs"]
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = get_daily_ci_runs(A__ )
UpperCamelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCamelCase = workflow_run['id']
break
return workflow_run_id
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = get_last_daily_ci_runs(A__ )
if workflow_run_id is not None:
UpperCamelCase = get_artifacts_links(worflow_run_id=A__ , token=A__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCamelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=A__ , artifact_url=A__ , output_dir=A__ , token=A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
get_last_daily_ci_artifacts(A__ , A__ , A__ )
UpperCamelCase = {}
for artifact_name in artifact_names:
UpperCamelCase = os.path.join(A__ , F"""{artifact_name}.zip""" )
if os.path.isfile(A__ ):
UpperCamelCase = {}
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
with z.open(A__ ) as f:
UpperCamelCase = f.read().decode('UTF-8' )
return results
| 28 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Optional[int]=3_6 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Dict=1_6 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Union[str, Any]=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Optional[int] ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = 3_0_0
return config
def A ( self : Tuple ):
"""simple docstring"""
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = self.prepare_config_and_inputs()
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = MraModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = MraModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = MraForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MraForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = MraForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = ()
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = MraModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : str ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = MraModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='MRA does not output attentions' )
def A ( self : List[str] ):
"""simple docstring"""
return
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
UpperCamelCase = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
UpperCamelCase = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = 5_0_2_6_5
UpperCamelCase = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 28 | 1 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = torch.exp(A__ )
UpperCamelCase = torch.sum(A__ , dim=1 ) # sum of exp(x_i)
UpperCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(A__ ) - B / A
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Any ):
"""simple docstring"""
super().__init__()
UpperCamelCase = config.output_attentions
UpperCamelCase = config.output_hidden_states
UpperCamelCase = nn.ModuleList([BertLayer(UpperCamelCase__ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = nn.ModuleList([BertHighway(UpperCamelCase__ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def A ( self : Dict , UpperCamelCase__ : str ):
"""simple docstring"""
if (type(UpperCamelCase__ ) is float) or (type(UpperCamelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCamelCase = x
else:
UpperCamelCase = x
def A ( self : Union[str, Any] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def A ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=None , ):
"""simple docstring"""
UpperCamelCase = ()
UpperCamelCase = ()
UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = layer_module(
UpperCamelCase__ , UpperCamelCase__ , head_mask[i] , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = layer_outputs[0]
if self.output_attentions:
UpperCamelCase = all_attentions + (layer_outputs[1],)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = current_outputs + (all_attentions,)
UpperCamelCase = self.highway[i](UpperCamelCase__ )
# logits, pooled_output
if not self.training:
UpperCamelCase = highway_exit[0]
UpperCamelCase = entropy(UpperCamelCase__ )
UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase__ , i + 1 )
else:
UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = outputs + (all_attentions,)
UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , _a , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Any ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
UpperCamelCase = config
UpperCamelCase = BertEmbeddings(UpperCamelCase__ )
UpperCamelCase = DeeBertEncoder(UpperCamelCase__ )
UpperCamelCase = BertPooler(UpperCamelCase__ )
self.init_weights()
def A ( self : int ):
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def A ( self : List[Any] ):
"""simple docstring"""
return self.embeddings.word_embeddings
def A ( self : List[str] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = value
def A ( self : Any , UpperCamelCase__ : List[str] ):
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase__ )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]=None , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
if encoder_attention_mask is None:
UpperCamelCase = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
if token_type_ids is None:
UpperCamelCase = torch.zeros(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCamelCase = encoder_attention_mask[:, None, None, :]
UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase = self.get_head_mask(UpperCamelCase__ , self.config.num_hidden_layers )
UpperCamelCase = self.embeddings(
input_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ )
UpperCamelCase = self.encoder(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(UpperCamelCase__ )
UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = message
UpperCamelCase = exit_layer # start from 1!
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = BertPooler(UpperCamelCase__ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def A ( self : Dict , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(UpperCamelCase__ )
# "return" pooler_output
# BertModel
UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCamelCase = bmodel_output[1]
UpperCamelCase = self.dropout(UpperCamelCase__ )
UpperCamelCase = self.classifier(UpperCamelCase__ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeBertModel(UpperCamelCase__ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def A ( self : str , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : str=-1 , UpperCamelCase__ : Union[str, Any]=False , ):
"""simple docstring"""
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.bert(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(UpperCamelCase__ )
UpperCamelCase = self.classifier(UpperCamelCase__ )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(UpperCamelCase__ )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase__ )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 28 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : Union[str, Any] = "\\n\n"
_lowerCamelCase : List[str] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_lowerCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def A ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 1_6 , UpperCamelCase__ : bool = True , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase = 'cuda'
else:
UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase = model.to(UpperCamelCase__ )
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(UpperCamelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase = model.config.max_length - 1
else:
UpperCamelCase = model.config.max_length
UpperCamelCase = tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors='pt' , return_attention_mask=UpperCamelCase__ , ).to(UpperCamelCase__ )
UpperCamelCase = encodings['input_ids']
UpperCamelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase = []
UpperCamelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ) ):
UpperCamelCase = min(start_index + batch_size , len(UpperCamelCase__ ) )
UpperCamelCase = encoded_texts[start_index:end_index]
UpperCamelCase = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCamelCase__ )
UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCamelCase__ ), attn_mask] , dim=1 )
UpperCamelCase = encoded_batch
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).logits
UpperCamelCase = out_logits[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = attn_mask[..., 1:].contiguous()
UpperCamelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , UpperCamelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCamelCase__ )}
| 28 | 1 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
super().__init__()
UpperCamelCase = nn.Linear(3 , 4 )
UpperCamelCase = nn.BatchNormad(4 )
UpperCamelCase = nn.Linear(4 , 5 )
def A ( self : int , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase__ ) ) )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , model.state_dict() )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'index.json' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase = os.path.join(UpperCamelCase__ , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase = torch.randn(2 , 3 , dtype=UpperCamelCase__ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase = offload_weight(UpperCamelCase__ , 'weight' , UpperCamelCase__ , {} )
UpperCamelCase = os.path.join(UpperCamelCase__ , 'weight.dat' )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
self.assertDictEqual(UpperCamelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(UpperCamelCase__ ).split('.' )[1]}} )
UpperCamelCase = load_offloaded_weight(UpperCamelCase__ , index['weight'] )
self.assertTrue(torch.equal(UpperCamelCase__ , UpperCamelCase__ ) )
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = ModelForTest()
UpperCamelCase = model.state_dict()
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' not in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'linear2' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' in k}
UpperCamelCase = {k: v for k, v in state_dict.items() if 'weight' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase__ , UpperCamelCase__ )
# Duplicates are removed
UpperCamelCase = OffloadedWeightsLoader(state_dict=UpperCamelCase__ , save_folder=UpperCamelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase__ , weight_map[key] ) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {'a.1': 0, 'a.10': 1, 'a.2': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1': 0, 'a.2': 2} )
UpperCamelCase = {'a.1.a': 0, 'a.10.a': 1, 'a.2.a': 2}
UpperCamelCase = extract_submodules_state_dict(UpperCamelCase__ , ['a.1', 'a.2'] )
self.assertDictEqual(UpperCamelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 50 ) -> int:
"""simple docstring"""
UpperCamelCase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
while i * i <= n:
UpperCamelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 1
while True:
i += 1
t_num += i
if count_divisors(A__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list:
"""simple docstring"""
UpperCamelCase = len(A__ )
for i in range(1 , A__ ):
UpperCamelCase = collection[i]
UpperCamelCase = 0
UpperCamelCase = i - 1
while low <= high:
UpperCamelCase = (low + high) // 2
if val < collection[mid]:
UpperCamelCase = mid - 1
else:
UpperCamelCase = mid + 1
for j in range(A__ , A__ , -1 ):
UpperCamelCase = collection[j - 1]
UpperCamelCase = val
return collection
if __name__ == "__main__":
_lowerCamelCase : int = input("Enter numbers separated by a comma:\n").strip()
_lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 28 | 1 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = HfArgumentParser(A__ )
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
UpperCamelCase = TensorFlowBenchmark(args=A__ )
try:
UpperCamelCase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCamelCase = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
UpperCamelCase = ' '.join(str(A__ ).split(' ' )[:-1] )
UpperCamelCase = ''
UpperCamelCase = eval(str(A__ ).split(' ' )[-1] )
UpperCamelCase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(A__ )
if len(A__ ) > 0:
UpperCamelCase = full_error_msg + begin_error_msg + str(A__ )
raise ValueError(A__ )
benchmark.run()
if __name__ == "__main__":
main()
| 28 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase = []
for i in range(A__ ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ )
UpperCamelCase = 1.0 - self.betas
UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# setable values
UpperCamelCase = None
UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() )
UpperCamelCase = variance_type
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ):
"""simple docstring"""
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) )
UpperCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase = variance.log()
UpperCamelCase = beta.log()
UpperCamelCase = (predicted_variance + 1) / 2
UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
UpperCamelCase = self.alphas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = torch.clamp(
UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device )
UpperCamelCase = self._get_variance(
UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , )
if self.variance_type == "fixed_small_log":
UpperCamelCase = variance
elif self.variance_type == "learned_range":
UpperCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
UpperCamelCase = variance * variance_noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ):
"""simple docstring"""
UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase = timesteps.to(original_samples.device )
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 28 | 1 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BloomTokenizerFast
_SCREAMING_SNAKE_CASE = BloomTokenizerFast
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """tokenizer_file"""
_SCREAMING_SNAKE_CASE = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def A ( self : List[str] ):
"""simple docstring"""
super().setUp()
UpperCamelCase = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[str] , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
UpperCamelCase = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
UpperCamelCase = tokenizer.batch_encode_plus(UpperCamelCase__ )['input_ids']
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : Tuple=6 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
UpperCamelCase = 'This is a simple input'
UpperCamelCase = ['This is a simple input 1', 'This is a simple input 2']
UpperCamelCase = ('This is a simple input', 'This is a pair')
UpperCamelCase = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__ )
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__ )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
UpperCamelCase = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding='max_length' , )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = load_dataset('xnli' , 'all_languages' , split='test' , streaming=UpperCamelCase__ )
UpperCamelCase = next(iter(UpperCamelCase__ ) )['premise'] # pick up one data
UpperCamelCase = list(sample_data.values() )
UpperCamelCase = list(map(tokenizer.encode , UpperCamelCase__ ) )
UpperCamelCase = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ ) for x in output_tokens]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 28 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any=1_3 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : str=[1_0, 2_0, 3_0, 4_0] , UpperCamelCase__ : str=[2, 2, 3, 2] , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Dict=1_0 , UpperCamelCase__ : Union[str, Any]=0.0_2 , UpperCamelCase__ : int=["stage2", "stage3", "stage4"] , UpperCamelCase__ : List[str]=[2, 3, 4] , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_labels
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = out_indices
UpperCamelCase = scope
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase = None
UpperCamelCase = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : List[str] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[int] ):
"""simple docstring"""
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def A ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def A ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def A ( self : Optional[int] ):
"""simple docstring"""
pass
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(UpperCamelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
UpperCamelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A ( self : Optional[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(UpperCamelCase__ )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase = model(**UpperCamelCase__ )
# verify the logits
UpperCamelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (ConvNextBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = ConvNextConfig
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = ConvNextModelTester(self )
| 28 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """deta"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : str , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=9_0_0 , UpperCamelCase__ : str=2_0_4_8 , UpperCamelCase__ : int=6 , UpperCamelCase__ : List[Any]=2_0_4_8 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Optional[int]=6 , UpperCamelCase__ : Optional[int]=1_0_2_4 , UpperCamelCase__ : Union[str, Any]=8 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any="relu" , UpperCamelCase__ : Tuple=2_5_6 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Optional[Any]=1.0 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : int="sine" , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=3_0_0 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=1 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Union[str, Any]=0.2_5 , **UpperCamelCase__ : int , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
UpperCamelCase = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = backbone_config.pop('model_type' )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(UpperCamelCase__ )
UpperCamelCase = backbone_config
UpperCamelCase = num_queries
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = init_xavier_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = auxiliary_loss
UpperCamelCase = position_embedding_type
# deformable attributes
UpperCamelCase = num_feature_levels
UpperCamelCase = encoder_n_points
UpperCamelCase = decoder_n_points
UpperCamelCase = two_stage
UpperCamelCase = two_stage_num_proposals
UpperCamelCase = with_box_refine
UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
UpperCamelCase = class_cost
UpperCamelCase = bbox_cost
UpperCamelCase = giou_cost
# Loss coefficients
UpperCamelCase = mask_loss_coefficient
UpperCamelCase = dice_loss_coefficient
UpperCamelCase = bbox_loss_coefficient
UpperCamelCase = giou_loss_coefficient
UpperCamelCase = eos_coefficient
UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ )
@property
def A ( self : List[str] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A ( self : Tuple ):
"""simple docstring"""
return self.d_model
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 28 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
def __lowerCamelCase ( A__ ) -> int:
"""simple docstring"""
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCamelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCamelCase = in_proj_weight_cross_attn[:256, :]
UpperCamelCase = in_proj_bias_cross_attn[:256]
UpperCamelCase = in_proj_weight_cross_attn[256:512, :]
UpperCamelCase = in_proj_bias_cross_attn[256:512]
UpperCamelCase = in_proj_weight_cross_attn[-256:, :]
UpperCamelCase = in_proj_bias_cross_attn[-256:]
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = image.size
UpperCamelCase = max(A__ , A__ )
UpperCamelCase = 800 if 'detection' in checkpoint_url else 1_000
UpperCamelCase = target_max_size / current_max_size
UpperCamelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __lowerCamelCase ( A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = F.to_tensor(A__ )
UpperCamelCase = F.normalize(A__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __lowerCamelCase ( A__ , A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
logger.info('Converting model...' )
# load original state dict
UpperCamelCase = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
UpperCamelCase = rename_backbone_keys(A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
UpperCamelCase = state_dict.pop(A__ )
UpperCamelCase = val
# create HuggingFace model and load state dict
UpperCamelCase = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCamelCase = 15
UpperCamelCase = 2
UpperCamelCase = {0: 'table', 1: 'table rotated'}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
else:
UpperCamelCase = 125
UpperCamelCase = 6
UpperCamelCase = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
UpperCamelCase = TableTransformerForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion
UpperCamelCase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
UpperCamelCase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=A__ )
UpperCamelCase = Image.open(A__ ).convert('RGB' )
UpperCamelCase = normalize(resize(A__ , A__ ) ).unsqueeze(0 )
UpperCamelCase = model(A__ )
if "detection" in checkpoint_url:
UpperCamelCase = (1, 15, 3)
UpperCamelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCamelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCamelCase = (1, 125, 7)
UpperCamelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCamelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , A__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , A__ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
UpperCamelCase = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(A__ )
image_processor.push_to_hub(A__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCamelCase : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28 | 1 |
'''simple docstring'''
import math
import qiskit
def __lowerCamelCase ( A__ = 1 , A__ = 1 , A__ = 1 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if (
isinstance(A__ , A__ )
or isinstance(A__ , A__ )
or isinstance(A__ , A__ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(A__ ) != input_a)
or (math.floor(A__ ) != input_a)
or (math.floor(A__ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
UpperCamelCase = qiskit.QuantumRegister(4 , 'qr' )
UpperCamelCase = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
UpperCamelCase = [input_a, input_a, carry_in]
UpperCamelCase = qiskit.QuantumCircuit(A__ , A__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(A__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(A__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(A__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , A__ ) # measure the last two qbits
UpperCamelCase = qiskit.Aer.get_backend('aer_simulator' )
UpperCamelCase = qiskit.execute(A__ , A__ , shots=1_000 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 28 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCamelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(_a )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , 'decord' )
self.check_model_type(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase = {}
if frame_sampling_rate is not None:
UpperCamelCase = frame_sampling_rate
if num_frames is not None:
UpperCamelCase = num_frames
UpperCamelCase = {}
if top_k is not None:
UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , UpperCamelCase__ : Union[str, List[str]] , **UpperCamelCase__ : Dict ):
"""simple docstring"""
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
UpperCamelCase = BytesIO(requests.get(UpperCamelCase__ ).content )
UpperCamelCase = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
UpperCamelCase = 0
UpperCamelCase = num_frames * frame_sampling_rate - 1
UpperCamelCase = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
UpperCamelCase = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def A ( self : Union[str, Any] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model(**UpperCamelCase__ )
return model_outputs
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase , UpperCamelCase = probs.topk(UpperCamelCase__ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCamelCase = scores.tolist()
UpperCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 28 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : List[str] = 5_0000
_lowerCamelCase : Optional[int] = 5000
_lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__)
_lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
UpperCamelCase = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
UpperCamelCase = func(A__ , **A__ )
print('shuffling dataset' )
UpperCamelCase = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(A__ ) )
UpperCamelCase = func(
A__ , **A__ )
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCamelCase : Optional[int] = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
_lowerCamelCase : Union[str, Any] = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
_lowerCamelCase : Dict = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
_lowerCamelCase : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
_lowerCamelCase : List[Any] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
_lowerCamelCase : List[str] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = randrange(len(A__ ) ), randrange(len(A__ ) )
UpperCamelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCamelCase ( A__ = 100 ) -> Optional[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
UpperCamelCase = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> Dict:
"""simple docstring"""
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def __lowerCamelCase ( A__ , A__ ) -> str:
"""simple docstring"""
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def __lowerCamelCase ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
UpperCamelCase = [PokerHand(A__ ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(A__ )
UpperCamelCase = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand('2C 4S AS 3D 5C' )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(A__ ) )
UpperCamelCase = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase , UpperCamelCase = PokerHand(A__ ), PokerHand(A__ )
UpperCamelCase = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 28 | 1 |
'''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
def __lowerCamelCase ( A__=None , A__=None ) -> Dict:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=A__ )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
_SCREAMING_SNAKE_CASE = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
_SCREAMING_SNAKE_CASE = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """Use FP16 to accelerate inference."""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """Benchmark training of model"""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """Verbose memory tracing"""} )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """Trace memory line by line"""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """Save result to a CSV file"""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """Save all print statements in a log file"""} )
_SCREAMING_SNAKE_CASE = field(default=_a , metadata={"""help""": """Whether to print environment information"""} )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=f'''inference_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
_SCREAMING_SNAKE_CASE = field(
default=f'''inference_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
_SCREAMING_SNAKE_CASE = field(
default=f'''train_time_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
_SCREAMING_SNAKE_CASE = field(
default=f'''train_memory_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
_SCREAMING_SNAKE_CASE = field(
default=f'''env_info_{round(time() )}.csv''' , metadata={"""help""": """CSV filename used if saving environment information."""} , )
_SCREAMING_SNAKE_CASE = field(
default=f'''log_{round(time() )}.csv''' , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
_SCREAMING_SNAKE_CASE = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
_SCREAMING_SNAKE_CASE = field(
default=_a , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def A ( self : int ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
' are deprecated in general and it is advised to use external Benchmarking libraries '
' to benchmark Transformer models.' , UpperCamelCase__ , )
def A ( self : int ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def A ( self : List[str] ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'Please make sure you provide at least one model name / model identifier, *e.g.* `--models'
' bert-base-cased` or `args.models = [\'bert-base-cased\'].' )
return self.models
@property
def A ( self : Optional[int] ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('Multiprocessing is currently not possible on TPU.' )
return False
else:
return True
| 28 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase__ : float = 0.0_2 , UpperCamelCase__ : float = 1_0_0 , UpperCamelCase__ : float = 1.0_0_7 , UpperCamelCase__ : float = 8_0 , UpperCamelCase__ : float = 0.0_5 , UpperCamelCase__ : float = 5_0 , ):
"""simple docstring"""
UpperCamelCase = sigma_max
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase = torch.tensor(UpperCamelCase__ , dtype=torch.floataa , device=UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCamelCase__ ).to(sample.device )
UpperCamelCase = sigma + gamma * sigma
UpperCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : str , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_hat + sigma_hat * model_output
UpperCamelCase = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = sample_prev + sigma_prev * model_output
UpperCamelCase = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCamelCase__ , derivative=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ):
"""simple docstring"""
raise NotImplementedError()
| 28 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Tuple = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase = FlaxBertModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase__ : str ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase = FlaxRobertaModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**UpperCamelCase__ : str ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
def A ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase = FlaxAutoModel.from_pretrained('bert-base' )
def A ( self : List[Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase = FlaxAutoModel.from_pretrained(UpperCamelCase__ , revision='aaaaaa' )
def A ( self : Union[str, Any] ):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
UpperCamelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A ( self : str ):
"""simple docstring"""
with self.assertRaisesRegex(UpperCamelCase__ , 'Use `from_pt=True` to load this model' ):
UpperCamelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 28 |
'''simple docstring'''
def __lowerCamelCase ( A__ = 10**9 ) -> int:
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
UpperCamelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 | 1 |
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 28 |
'''simple docstring'''
import math
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=0 ): # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase = n
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # adjacency matrix for weight
UpperCamelCase = [
[math.inf for j in range(0 , UpperCamelCase__ )] for i in range(0 , UpperCamelCase__ )
] # dp[i][j] stores minimum distance from i to j
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = w
def A ( self : str ):
"""simple docstring"""
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCamelCase = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : List[str] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 28 | 1 |
'''simple docstring'''
def __lowerCamelCase ( A__ ) -> list[list[float]]:
"""simple docstring"""
UpperCamelCase = []
for data in source_data:
for i, el in enumerate(A__ ):
if len(A__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A__ ) )
return data_lists
def __lowerCamelCase ( A__ , A__ ) -> list[list[float]]:
"""simple docstring"""
UpperCamelCase = []
for dlist, weight in zip(A__ , A__ ):
UpperCamelCase = min(A__ )
UpperCamelCase = max(A__ )
UpperCamelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
UpperCamelCase = F"""Invalid weight of {weight:f} provided"""
raise ValueError(A__ )
score_lists.append(A__ )
return score_lists
def __lowerCamelCase ( A__ ) -> list[float]:
"""simple docstring"""
UpperCamelCase = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A__ ):
UpperCamelCase = final_scores[j] + ele
return final_scores
def __lowerCamelCase ( A__ , A__ ) -> list[list[float]]:
"""simple docstring"""
UpperCamelCase = get_data(A__ )
UpperCamelCase = calculate_each_score(A__ , A__ )
UpperCamelCase = generate_final_scores(A__ )
# append scores to source data
for i, ele in enumerate(A__ ):
source_data[i].append(A__ )
return source_data
| 28 |
'''simple docstring'''
_lowerCamelCase : int = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 28 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Tuple = logging.getLogger(__name__)
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
@dataclass(frozen=_a )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
def __init__( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
UpperCamelCase = hans_processors[task]()
UpperCamelCase = os.path.join(
UpperCamelCase__ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCamelCase__ ) , UpperCamelCase__ , ) , )
UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase = label_list[2], label_list[1]
UpperCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase = cached_features_file + '.lock'
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase = torch.load(UpperCamelCase__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase = (
processor.get_dev_examples(UpperCamelCase__ ) if evaluate else processor.get_train_examples(UpperCamelCase__ )
)
logger.info('Training examples: %s' , len(UpperCamelCase__ ) )
UpperCamelCase = hans_convert_examples_to_features(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
logger.info('Saving features into cached file %s' , UpperCamelCase__ )
torch.save(self.features , UpperCamelCase__ )
def __len__( self : Any ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return self.features[i]
def A ( self : Any ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
def __init__( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] = 1_2_8 , UpperCamelCase__ : Any=False , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
UpperCamelCase = hans_processors[task]()
UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase = label_list[2], label_list[1]
UpperCamelCase = label_list
UpperCamelCase = processor.get_dev_examples(UpperCamelCase__ ) if evaluate else processor.get_train_examples(UpperCamelCase__ )
UpperCamelCase = hans_convert_examples_to_features(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCamelCase__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase = tf.data.Dataset.from_generator(
UpperCamelCase__ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A ( self : Dict ):
"""simple docstring"""
return self.dataset
def __len__( self : Union[str, Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
return self.features[i]
def A ( self : Tuple ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : List[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase__ , 'heuristics_train_set.txt' ) ) , 'train' )
def A ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase__ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def A ( self : Optional[Any] ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = []
for i, line in enumerate(UpperCamelCase__ ):
if i == 0:
continue
UpperCamelCase = '%s-%s' % (set_type, line[0])
UpperCamelCase = line[5]
UpperCamelCase = line[6]
UpperCamelCase = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase = line[0]
examples.append(InputExample(guid=UpperCamelCase__ , text_a=UpperCamelCase__ , text_b=UpperCamelCase__ , label=UpperCamelCase__ , pairID=UpperCamelCase__ ) )
return examples
def __lowerCamelCase ( A__ , A__ , A__ , A__ , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {label: i for i, label in enumerate(A__ )}
UpperCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(A__ ) , desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d' % (ex_index) )
UpperCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=A__ , max_length=A__ , padding='max_length' , truncation=A__ , return_overflowing_tokens=A__ , )
UpperCamelCase = label_map[example.label] if example.label in label_map else 0
UpperCamelCase = int(example.pairID )
features.append(InputFeatures(**A__ , label=A__ , pairID=A__ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
_lowerCamelCase : Any = {
"hans": 3,
}
_lowerCamelCase : Optional[int] = {
"hans": HansProcessor,
}
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : List[Any] = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCamelCase : Tuple = "\nHuman: <<task>>\n\nAssistant: "
_lowerCamelCase : List[Any] = "huggingface-tools/default-prompts"
_lowerCamelCase : Union[str, Any] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __lowerCamelCase ( A__ , A__ , A__="run" ) -> List[Any]:
"""simple docstring"""
if prompt_or_repo_id is None:
UpperCamelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , A__ ) is not None:
return prompt_or_repo_id
UpperCamelCase = cached_file(
A__ , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(A__ , 'r' , encoding='utf-8' ) as f:
return f.read()
| 28 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.