code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowerCAmelCase ( A__ ):
def __init__( self : int , UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] , UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCAmelCase : Optional[Features] = None , UpperCAmelCase : str = None , UpperCAmelCase : bool = False , **UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
super().__init__(features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCamelCase__ : Any = Sql(
cache_dir=lowerCAmelCase__ , features=lowerCAmelCase__ , sql=lowerCAmelCase__ , con=lowerCAmelCase__ , **lowerCAmelCase__ , )
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Dict = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , )
# Build dataset for splits
lowerCamelCase__ : List[Any] = self.builder.as_dataset(
split='train' , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
class lowerCAmelCase :
def __init__( self : int , UpperCAmelCase : Dataset , UpperCAmelCase : str , UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , **UpperCAmelCase : Union[str, Any] , ) -> int:
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
lowerCamelCase__ : List[Any] = dataset
lowerCamelCase__ : Tuple = name
lowerCamelCase__ : Optional[Any] = con
lowerCamelCase__ : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowerCamelCase__ : List[Any] = num_proc
lowerCamelCase__ : Optional[int] = to_sql_kwargs
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : Optional[int] = self.to_sql_kwargs.pop('sql' , lowerCAmelCase__ )
lowerCamelCase__ : Optional[int] = self.to_sql_kwargs.pop('con' , lowerCAmelCase__ )
lowerCamelCase__ : int = self.to_sql_kwargs.pop('index' , lowerCAmelCase__ )
lowerCamelCase__ : int = self._write(index=lowerCAmelCase__ , **self.to_sql_kwargs )
return written
def A_ ( self : Dict , UpperCAmelCase : List[Any] ) -> Optional[Any]:
lowerCamelCase__ : List[str] = args
lowerCamelCase__ : Union[str, Any] = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
lowerCamelCase__ : List[str] = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowerCamelCase__ : Union[str, Any] = batch.to_pandas()
lowerCamelCase__ : Union[str, Any] = df.to_sql(self.name , self.con , index=lowerCAmelCase__ , **lowerCAmelCase__ )
return num_rows or len(lowerCAmelCase__ )
def A_ ( self : str , UpperCAmelCase : str , **UpperCAmelCase : Tuple ) -> int:
lowerCamelCase__ : str = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowerCamelCase__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , lowerCAmelCase__ , lowerCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating SQL from Arrow format' , ):
written += num_rows
return written
| 50 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 | 0 |
import pprint
import requests
lowercase = 'https://zenquotes.io/api'
def __UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/today').json()
def __UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/random').json()
if __name__ == "__main__":
lowercase = random_quotes()
pprint.pprint(response)
| 178 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 0 |
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(lowercase__ , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 159 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
_snake_case = True if '''large''' in model_name or '''huge''' in model_name else False
_snake_case = True if '''large''' in model_name or '''huge''' in model_name else False
_snake_case = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_snake_case = [3, 3, 3, 3]
_snake_case = [5, 5, 5, 5]
elif "fl4" in model_name:
_snake_case = [4, 4, 4, 4]
_snake_case = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_snake_case = [3, 3, 3, 3]
if "lrf" in model_name:
_snake_case = [3, 3, 3, 3]
else:
_snake_case = [2, 2, 2, 2]
if "tiny" in model_name:
_snake_case = 96
elif "small" in model_name:
_snake_case = 96
elif "base" in model_name:
_snake_case = 128
elif "large" in model_name:
_snake_case = 192
elif "xlarge" in model_name:
_snake_case = 256
elif "huge" in model_name:
_snake_case = 352
# set label information
_snake_case = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
_snake_case = '''imagenet-22k-id2label.json'''
else:
_snake_case = '''imagenet-1k-id2label.json'''
_snake_case = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case = {int(lowercase__ ): v for k, v in idalabel.items()}
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = FocalNetConfig(
embed_dim=lowercase__ , depths=lowercase__ , focal_levels=lowercase__ , focal_windows=lowercase__ , use_conv_embed=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ , use_post_layernorm=lowercase__ , use_layerscale=lowercase__ , )
return config
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if "patch_embed.proj" in name:
_snake_case = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
_snake_case = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
_snake_case = '''encoder.''' + name
if "encoder.layers" in name:
_snake_case = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
_snake_case = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
_snake_case = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_snake_case = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_snake_case = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_snake_case = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
_snake_case = '''layernorm.weight'''
if name == "norm.bias":
_snake_case = '''layernorm.bias'''
if "head" in name:
_snake_case = name.replace("""head""" , """classifier""" )
else:
_snake_case = '''focalnet.''' + name
return name
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
# fmt: off
_snake_case = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
_snake_case = model_name_to_url[model_name]
print("""Checkpoint URL: """ , lowercase__ )
_snake_case = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )['''model''']
# rename keys
for key in state_dict.copy().keys():
_snake_case = state_dict.pop(lowercase__ )
_snake_case = val
_snake_case = get_focalnet_config(lowercase__ )
_snake_case = FocalNetForImageClassification(lowercase__ )
model.eval()
# load state dict
model.load_state_dict(lowercase__ )
# verify conversion
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = BitImageProcessor(
do_resize=lowercase__ , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowercase__ , crop_size=224 , do_normalize=lowercase__ , image_mean=lowercase__ , image_std=lowercase__ , )
_snake_case = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
_snake_case = processor(images=lowercase__ , return_tensors="""pt""" )
_snake_case = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_snake_case = image_transforms(lowercase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowercase__ , atol=1E-4 )
_snake_case = model(**lowercase__ )
_snake_case = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_snake_case = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
_snake_case = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
_snake_case = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
_snake_case = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
_snake_case = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
_snake_case = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__lowerCAmelCase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 341 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[int] = ['''keras_nlp''']
def __init__( self : Tuple , *a_ : Optional[Any] , **a_ : Dict ):
requires_backends(self , ["keras_nlp"] )
| 241 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 0 |
"""simple docstring"""
from math import ceil
def _UpperCAmelCase ( __lowerCamelCase : Tuple = 10_01 ) -> Dict:
_snake_case = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
_snake_case = 2 * i + 1
_snake_case = 2 * i
_snake_case = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCAmelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 288 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase__ = 'base_with_context'
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[F'layers_{lyr_num}']
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = ly_weight['''attention''']
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Dict ) -> Union[str, Any]:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
A__ = weights[F'layers_{lyr_num}']
A__ = ly_weight['''attention''']
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> str:
'''simple docstring'''
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=lowercase__ )
A__ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
A__ = weights[F'layers_{lyr_num}']
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
A__ = ly_weight['''self_attention''']
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = ly_weight['''MultiHeadDotProductAttention_0''']
A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
A__ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
A__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
A__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> int:
'''simple docstring'''
A__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
A__ = jnp.tree_util.tree_map(onp.array , lowercase__ )
A__ = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
A__ = os.path.join(args.checkpoint_path , ".." , "config.gin" )
A__ = inference.parse_training_gin_file(lowercase__ , lowercase__ )
A__ = inference.InferenceModel(args.checkpoint_path , lowercase__ )
A__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
A__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
A__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
A__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , lowercase__ )
A__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , lowercase__ )
A__ = load_decoder(ta_checkpoint["target"]["decoder"] , lowercase__ )
A__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
A__ = SpectrogramDiffusionPipeline(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 68 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 211 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 0 |
'''simple docstring'''
from math import factorial
def a__ ( a__ = 20 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__SCREAMING_SNAKE_CASE = n // 2
return int(factorial(lowercase__ ) / (factorial(lowercase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
UpperCAmelCase : Optional[int] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 267 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( A__ ):
__magic_name__: Union[str, Any] = ['''audio_values''', '''audio_mask''']
def __init__( self : Optional[Any] , _A : str=2048 , _A : str=1 , _A : List[Any]=[16, 16] , _A : List[str]=128 , _A : Dict=44100 , _A : Tuple=86 , _A : List[str]=2048 , _A : Union[str, Any]=0.0 , **_A : int , ) -> str:
"""simple docstring"""
super().__init__(
feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case_ : Union[str, Any] = spectrogram_length
snake_case_ : Dict = num_channels
snake_case_ : List[Any] = patch_size
snake_case_ : Optional[int] = feature_size // self.patch_size[1]
snake_case_ : Optional[Any] = n_fft
snake_case_ : int = sampling_rate // hop_length_to_sampling_rate
snake_case_ : Optional[int] = sampling_rate
snake_case_ : Tuple = padding_value
snake_case_ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase__ , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=lowerCAmelCase__ , norm='slaney' , mel_scale='slaney' , ).T
def UpperCAmelCase_ ( self : Union[str, Any] , _A : np.array ) -> np.ndarray:
"""simple docstring"""
snake_case_ : Union[str, Any] = spectrogram(
lowerCAmelCase__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
snake_case_ : Tuple = log_spec[:, :-1]
snake_case_ : Tuple = log_spec - 2_0.0
snake_case_ : List[str] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Dict , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = True , _A : Optional[int] = None , _A : bool = False , _A : bool = False , **_A : Union[str, Any] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
F""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
snake_case_ : Any = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
snake_case_ : str = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case_ : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
snake_case_ : Union[str, Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case_ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case_ : str = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
snake_case_ : Optional[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase__ ):
snake_case_ : Optional[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
snake_case_ : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
snake_case_ : Dict = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
snake_case_ : Optional[int] = np.array(lowerCAmelCase__ ).astype(np.floataa )
# convert into correct format for padding
snake_case_ : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
snake_case_ : List[str] = np.ones([len(lowerCAmelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
snake_case_ : Union[str, Any] = padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase__ ) ):
snake_case_ : Dict = audio_features[i]
snake_case_ : str = feature
# return as BatchFeature
if return_attention_mask:
snake_case_ : Dict = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
snake_case_ : List[str] = {'''audio_values''': padded_audio_features}
snake_case_ : Any = BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
return encoded_inputs
| 327 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=A__ ):
UpperCAmelCase__ = ['''flax''', '''transformers''']
def __init__( self : Tuple , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[Any] ) -> Dict:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def A_ ( cls : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def A_ ( cls : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
class lowerCAmelCase ( metaclass=A__ ):
UpperCAmelCase__ = ['''flax''', '''transformers''']
def __init__( self : List[str] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def A_ ( cls : Any , *UpperCAmelCase : str , **UpperCAmelCase : str ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def A_ ( cls : str , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[str] ) -> Tuple:
requires_backends(cls , ['flax', 'transformers'] )
class lowerCAmelCase ( metaclass=A__ ):
UpperCAmelCase__ = ['''flax''', '''transformers''']
def __init__( self : Any , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[str] ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def A_ ( cls : Optional[Any] , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def A_ ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class lowerCAmelCase ( metaclass=A__ ):
UpperCAmelCase__ = ['''flax''', '''transformers''']
def __init__( self : Dict , *UpperCAmelCase : Dict , **UpperCAmelCase : List[str] ) -> Tuple:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def A_ ( cls : Tuple , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[str] ) -> str:
requires_backends(cls , ['flax', 'transformers'] )
| 50 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __UpperCAmelCase ( a_ , a_ , a_=1E-12):
snake_case_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1) , a_min=lowercase__)).T
snake_case_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1) , a_min=lowercase__)).T
return jnp.matmul(lowercase__ , norm_emb_a.T)
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
lowerCAmelCase = 42
lowerCAmelCase = jnp.floataa
def _UpperCamelCase ( self ) -> Any:
snake_case_ = FlaxCLIPVisionModule(self.config.vision_config )
snake_case_ = nn.Dense(self.config.projection_dim , use_bias=lowerCAmelCase__ , dtype=self.dtype )
snake_case_ = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
snake_case_ = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
snake_case_ = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) )
snake_case_ = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self , a ) -> Any:
snake_case_ = self.vision_model(lowerCAmelCase__ )[1]
snake_case_ = self.visual_projection(lowerCAmelCase__ )
snake_case_ = jax_cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
snake_case_ = jax_cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
snake_case_ = 0.0
snake_case_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
snake_case_ = jnp.round(lowerCAmelCase__ , 3 )
snake_case_ = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCAmelCase__ )
# Use a lower threshold if an image has any special care concept
snake_case_ = is_special_care * 0.01
snake_case_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
snake_case_ = jnp.round(lowerCAmelCase__ , 3 )
snake_case_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = CLIPConfig
lowerCAmelCase = '''clip_input'''
lowerCAmelCase = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , a , a = None , a = 0 , a = jnp.floataa , a = True , **a , ) -> Optional[Any]:
if input_shape is None:
snake_case_ = (1, 2_24, 2_24, 3)
snake_case_ = self.module_class(config=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **lowerCAmelCase__ )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , input_shape=lowerCAmelCase__ , seed=lowerCAmelCase__ , dtype=lowerCAmelCase__ , _do_init=_do_init )
def _UpperCamelCase ( self , a , a , a = None ) -> FrozenDict:
# init input tensor
snake_case_ = jax.random.normal(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ = jax.random.split(lowerCAmelCase__ )
snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng}
snake_case_ = self.module.init(lowerCAmelCase__ , lowerCAmelCase__ )['''params''']
return random_params
def __call__( self , a , a = None , ) -> Tuple:
snake_case_ = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 178 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 0 |
"""simple docstring"""
import random
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ):
__lowerCAmelCase : dict = {i: [] for i in range(lowercase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowercase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if random.random() < probability:
graph[i].append(lowercase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowercase__ )
return graph
def __lowerCAmelCase (_UpperCamelCase ):
return {
i: [j for j in range(lowercase__ ) if i != j] for i in range(lowercase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE :Optional[Any] = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE :Any = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE :Optional[Any] = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
snake_case_ = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
snake_case_ = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 159 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = DebertaTokenizer
lowerCAmelCase_ = True
lowerCAmelCase_ = DebertaTokenizerFast
def lowercase (self ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
_snake_case = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_snake_case = {'''unk_token''': '''[UNK]'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
def lowercase (self , **UpperCAmelCase ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = '''lower newer'''
_snake_case = '''lower newer'''
return input_text, output_text
def lowercase (self ) -> Optional[int]:
_snake_case = self.get_tokenizer()
_snake_case = '''lower newer'''
_snake_case = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_snake_case = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def lowercase (self ) -> Union[str, Any]:
_snake_case = self.get_tokenizer()
_snake_case = tokenizer("""Hello""" , """World""" )
_snake_case = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["""token_type_ids"""] , lowerCAmelCase__ )
@slow
def lowercase (self ) -> List[Any]:
_snake_case = self.tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_snake_case = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__ )
_snake_case = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__ )
_snake_case = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_snake_case = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowercase (self ) -> Dict:
_snake_case = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
_snake_case = tokenizer_class.from_pretrained("""microsoft/deberta-base""" )
_snake_case = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
_snake_case = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
_snake_case = [tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) for seq in encoding['''input_ids''']]
# fmt: off
_snake_case = {
'''input_ids''': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_snake_case = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , lowerCAmelCase__ )
for expected, decoded in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) | 341 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 0 |
"""simple docstring"""
from typing import Any
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> str:
"""simple docstring"""
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : str = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : List[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : int = ''''''
lowerCAmelCase_ : Any = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : Tuple = probability
lowerCAmelCase_ : Union[str, Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : str = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Union[str, Any] = arg_max
# The final observation
lowerCAmelCase_ : Union[str, Any] = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase_ : Tuple = ''''''
lowerCAmelCase_ : Optional[int] = -1
for k_state in states_space:
lowerCAmelCase_ : Dict = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : int = probability
lowerCAmelCase_ : Optional[int] = k_state
lowerCAmelCase_ : Optional[int] = arg_max
# Process pointers backwards
lowerCAmelCase_ : List[Any] = last_state
lowerCAmelCase_ : Optional[Any] = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase_ : Tuple = pointers[previous, observations_space[o]]
result.reverse()
return result
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> int:
"""simple docstring"""
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> List[str]:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There\'s an empty parameter" )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
"""simple docstring"""
_validate_list(lowercase__ , "observations_space" )
_validate_list(lowercase__ , "states_space" )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_object , lowercase__ ):
lowerCAmelCase_ : str = f'''{var_name} must be a list'''
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ : str = f'''{var_name} must be a list of strings'''
raise ValueError(lowercase__ )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> str:
"""simple docstring"""
_validate_dict(lowercase__ , "initial_probabilities" , lowercase__ )
_validate_nested_dict(lowercase__ , "transition_probabilities" )
_validate_nested_dict(lowercase__ , "emission_probabilities" )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
"""simple docstring"""
if not isinstance(_object , lowercase__ ):
lowerCAmelCase_ : str = f'''{var_name} must be a dict'''
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase_ : Dict = f'''{var_name} all keys must be strings'''
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase_ : Tuple = '''nested dictionary ''' if nested else ''''''
lowerCAmelCase_ : Optional[Any] = f'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9 | 0 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int ) -> List[str]:
_snake_case = len(lowercase__ )
for i in range(1 , lowercase__ ):
_snake_case = collection[i]
_snake_case = 0
_snake_case = i - 1
while low <= high:
_snake_case = (low + high) // 2
if val < collection[mid]:
_snake_case = mid - 1
else:
_snake_case = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
_snake_case = collection[j - 1]
_snake_case = val
return collection
if __name__ == "__main__":
UpperCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 288 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = CTRLTokenizer
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
A__ = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
A__ = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
A__ = {'''unk_token''': '''<unk>'''}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCamelCase ( self , **lowercase ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
A__ = '''adapt react readapt apt'''
A__ = '''adapt react readapt apt'''
return input_text, output_text
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = '''adapt react readapt apt'''
A__ = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
A__ = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
| 68 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 0 |
'''simple docstring'''
from math import factorial
def lowerCAmelCase (__A = 100):
"""simple docstring"""
return sum(int(lowercase__) for x in str(factorial(lowercase__)))
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 211 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class lowerCAmelCase__ ( A__ ):
"""simple docstring"""
lowerCAmelCase__ = '''bert'''
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : Tuple=768 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=512 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : int=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : List[str]="absolute" , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class lowerCAmelCase__ ( A__ ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 267 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
return [
int(10_00 * (box[0] / width) ),
int(10_00 * (box[1] / height) ),
int(10_00 * (box[2] / width) ),
int(10_00 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a = None ):
snake_case_ : int = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
snake_case_ : List[Any] = to_pil_image(lowercase__ )
snake_case_ : Tuple = pil_image.size
snake_case_ : Tuple = pytesseract.image_to_data(lowercase__ , lang=lowercase__ , output_type='dict' , config=lowercase__ )
snake_case_ : List[str] = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
snake_case_ : Union[str, Any] = [idx for idx, word in enumerate(lowercase__ ) if not word.strip()]
snake_case_ : List[str] = [word for idx, word in enumerate(lowercase__ ) if idx not in irrelevant_indices]
snake_case_ : Dict = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
snake_case_ : Union[str, Any] = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
snake_case_ : Dict = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
snake_case_ : Union[str, Any] = [coord for idx, coord in enumerate(lowercase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
snake_case_ : List[Any] = []
for x, y, w, h in zip(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
snake_case_ : Union[str, Any] = [x, y, x + w, y + h]
actual_boxes.append(lowercase__ )
# finally, normalize the bounding boxes
snake_case_ : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase__ , lowercase__ , lowercase__ ) )
assert len(lowercase__ ) == len(lowercase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE_ ( A__ ):
__magic_name__: int = ['''pixel_values''']
def __init__( self : Optional[int] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Optional[str] = None , _A : Optional[str] = "" , **_A : str , ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
snake_case_ : List[str] = size if size is not None else {'''height''': 224, '''width''': 224}
snake_case_ : Union[str, Any] = get_size_dict(lowerCAmelCase__ )
snake_case_ : int = do_resize
snake_case_ : Union[str, Any] = size
snake_case_ : int = resample
snake_case_ : int = apply_ocr
snake_case_ : Tuple = ocr_lang
snake_case_ : List[Any] = tesseract_config
def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
"""simple docstring"""
snake_case_ : List[Any] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
snake_case_ : Optional[int] = (size['''height'''], size['''width'''])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Optional[str] = None , _A : Optional[str] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Dict , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ : Any = do_resize if do_resize is not None else self.do_resize
snake_case_ : Optional[Any] = size if size is not None else self.size
snake_case_ : Optional[int] = get_size_dict(lowerCAmelCase__ )
snake_case_ : int = resample if resample is not None else self.resample
snake_case_ : List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
snake_case_ : Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
snake_case_ : List[str] = tesseract_config if tesseract_config is not None else self.tesseract_config
snake_case_ : Any = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
# All transformations expect numpy arrays.
snake_case_ : Any = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if apply_ocr:
requires_backends(self , 'pytesseract' )
snake_case_ : Tuple = []
snake_case_ : Any = []
for image in images:
snake_case_ : Optional[int] = apply_tesseract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
words_batch.append(lowerCAmelCase__ )
boxes_batch.append(lowerCAmelCase__ )
if do_resize:
snake_case_ : Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
snake_case_ : Any = [flip_channel_order(lowerCAmelCase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
snake_case_ : Optional[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=lowerCAmelCase__ )
if apply_ocr:
snake_case_ : Any = words_batch
snake_case_ : Optional[Any] = boxes_batch
return data
| 327 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase : Optional[Any] = 16
_UpperCAmelCase : Union[str, Any] = 32
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Union[str, Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase__ : List[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ : int = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase__ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase__ : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase__ : str = 8
else:
lowerCamelCase__ : Union[str, Any] = None
return tokenizer.pad(
lowercase__ , padding='longest' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='pt' , )
# Instantiate dataloaders.
lowerCamelCase__ : Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCamelCase__ : Tuple = DataLoader(
tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowercase__ ) == "1":
lowerCamelCase__ : Optional[Any] = 2
# New Code #
lowerCamelCase__ : Optional[int] = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCamelCase__ : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ : Tuple = config['''lr''']
lowerCamelCase__ : Optional[Any] = int(config['num_epochs'] )
lowerCamelCase__ : Union[str, Any] = int(config['seed'] )
lowerCamelCase__ : Optional[Any] = int(config['batch_size'] )
lowerCamelCase__ : Dict = evaluate.load('glue' , 'mrpc' )
set_seed(lowercase__ )
lowerCamelCase__ : Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase__ : int = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCamelCase__ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ : str = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCamelCase__ : Tuple = model(**lowercase__ )
lowerCamelCase__ : str = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase__ : Dict = model(**lowercase__ )
lowerCamelCase__ : List[str] = outputs.logits.argmax(dim=-1 )
lowerCamelCase__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCamelCase__ : Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowercase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
lowerCamelCase__ : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowercase__ , default=lowercase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=lowercase__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
lowerCamelCase__ : Any = parser.parse_args()
lowerCamelCase__ : Union[str, Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 50 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 | 0 |
from __future__ import annotations
import bisect
def __UpperCAmelCase ( a_ , a_ , a_ = 0 , a_ = -1):
if hi < 0:
snake_case_ = len(lowercase__)
while lo < hi:
snake_case_ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case_ = mid + 1
else:
snake_case_ = mid
return lo
def __UpperCAmelCase ( a_ , a_ , a_ = 0 , a_ = -1):
if hi < 0:
snake_case_ = len(lowercase__)
while lo < hi:
snake_case_ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case_ = mid + 1
else:
snake_case_ = mid
return lo
def __UpperCAmelCase ( a_ , a_ , a_ = 0 , a_ = -1):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__) , lowercase__)
def __UpperCAmelCase ( a_ , a_ , a_ = 0 , a_ = -1):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__) , lowercase__)
def __UpperCAmelCase ( a_ , a_):
snake_case_ = 0
snake_case_ = len(lowercase__) - 1
while left <= right:
snake_case_ = left + (right - left) // 2
snake_case_ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case_ = midpoint - 1
else:
snake_case_ = midpoint + 1
return None
def __UpperCAmelCase ( a_ , a_):
snake_case_ = bisect.bisect_left(lowercase__ , lowercase__)
if index != len(lowercase__) and sorted_collection[index] == item:
return index
return None
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
if right < left:
return None
snake_case_ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1)
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__)
if __name__ == "__main__":
lowercase = input("Enter numbers separated by comma:\n").strip()
lowercase = sorted(int(item) for item in user_input.split(","))
lowercase = int(input("Enter a single number to be found in the list:\n"))
lowercase = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.')
| 178 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__lowerCAmelCase : int = len(lowercase__ ) + len(lowercase__ )
else:
__lowerCAmelCase : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__lowerCAmelCase : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__lowerCAmelCase : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__lowerCAmelCase : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
lowerCamelCase__ = {'a', 'b', 'c', 'd', 'e'}
lowerCamelCase__ = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b)) | 86 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 0 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] )->Any:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
SCREAMING_SNAKE_CASE :Optional[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class __lowerCAmelCase ( A__ ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase : ArgumentParser ) -> Any:
"""simple docstring"""
snake_case_ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Model\'s type." )
train_parser.add_argument(
"--tf_checkpoint" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=lowerCAmelCase__ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , *_lowerCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
snake_case_ = logging.get_logger("transformers-cli/converting" )
self._logger.info(F'''Loading model {model_type}''' )
snake_case_ = model_type
snake_case_ = tf_checkpoint
snake_case_ = pytorch_dump_output
snake_case_ = config
snake_case_ = finetuning_task_name
def lowerCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
snake_case_ = self._tf_checkpoint
snake_case_ = ''''''
else:
snake_case_ = self._tf_checkpoint
snake_case_ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase__ , self._config , self._pytorch_dump_output , lowerCAmelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 159 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
) | 341 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 0 |
"""simple docstring"""
UpperCAmelCase__ = 8.31_44_62 # Unit - J mol-1 K-1
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> str:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 288 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 0 |
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = arr.split("," )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = [int(self.array[0] )] * len(self.array )
A__ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
A__ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
A__ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
lowerCAmelCase__ = input("""please input some numbers:""")
lowerCAmelCase__ = SubArray(whole_array)
lowerCAmelCase__ = array.solve_sub_array()
print(("""the results is:""", re))
| 68 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_a = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = TFAutoModel.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = AutoModel.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_a = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = AutoModelForPreTraining.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ (self ) -> List[Any]:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_a = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
_a = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ (self ) -> Dict:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = AutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ (self ) -> Tuple:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_a = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
_a = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ (self ) -> List[str]:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
_a = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
_a = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_a = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def a__ (self ) -> List[str]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_a = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
_a = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14_410 )
_a = AutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14_410 )
def a__ (self ) -> int:
"""simple docstring"""
_a = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14_410 )
_a = AutoModelWithLMHead.from_pretrained(lowerCAmelCase__ , from_tf=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 14_410 )
| 211 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 0 |
'''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowerCAmelCase__ ( A__ ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Distribution , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=0 ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 1.0 if scale is None else scale
__SCREAMING_SNAKE_CASE = 0.0 if loc is None else loc
super().__init__(lowerCAmelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCAmelCase__ )] )
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.variance.sqrt()
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : Callable[..., Tuple[torch.Tensor]] , **__SCREAMING_SNAKE_CASE : Tuple ) -> None:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = args_dim
__SCREAMING_SNAKE_CASE = nn.ModuleList([nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) for dim in args_dim.values()] )
__SCREAMING_SNAKE_CASE = domain_map
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Tuple[torch.Tensor]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [proj(lowerCAmelCase__ ) for proj in self.proj]
return self.domain_map(*lowerCAmelCase__ )
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE = function
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , *__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.function(lowerCAmelCase__ , *lowerCAmelCase__ )
class lowerCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int = 1 ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowerCAmelCase__ )
else:
return Independent(self.distribution_class(*lowerCAmelCase__ ) , 1 )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , ) -> Distribution:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._base_distribution(lowerCAmelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCAmelCase__ , loc=lowerCAmelCase__ , scale=lowerCAmelCase__ , event_dim=self.event_dim )
@property
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> float:
"""simple docstring"""
return 0.0
def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=lowerCAmelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCAmelCase__ ( self : int , *__SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[str]:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowerCAmelCase__ ) + 4.0 )) / 2.0
class lowerCAmelCase__ ( A__ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = cls.squareplus(lowerCAmelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
__SCREAMING_SNAKE_CASE = 2.0 + cls.squareplus(lowerCAmelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCAmelCase__ ( A__ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = cls.squareplus(lowerCAmelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCAmelCase__ ( A__ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls : List[str] , __SCREAMING_SNAKE_CASE : torch.Tensor , __SCREAMING_SNAKE_CASE : torch.Tensor ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = cls.squareplus(lowerCAmelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Distribution:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCAmelCase__ , logits=lowerCAmelCase__ )
else:
return Independent(self.distribution_class(total_count=lowerCAmelCase__ , logits=lowerCAmelCase__ ) , 1 )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , __SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None ) -> Distribution:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 267 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE__ ( __a ):
if not postfix_notation:
return 0
snake_case_ : Dict = {'''+''', '''-''', '''*''', '''/'''}
snake_case_ : list[Any] = []
for token in postfix_notation:
if token in operations:
snake_case_ : List[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 0 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase ( A__ ):
def __init__( self : List[str] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ) -> None:
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 50 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase = 500000
lowercase = os.path.split(__file__)
lowercase = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __UpperCAmelCase ( a_ , **a_):
snake_case_ = dataset.map(**lowercase__)
@get_duration
def __UpperCAmelCase ( a_ , **a_):
snake_case_ = dataset.filter(**lowercase__)
def __UpperCAmelCase ( ):
snake_case_ = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = datasets.Features({'text': datasets.Value('string'), 'numbers': datasets.Value('float32')})
snake_case_ = generate_example_dataset(
os.path.join(lowercase__ , 'dataset.arrow') , lowercase__ , num_examples=lowercase__)
snake_case_ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase__)
def tokenize(a_):
return tokenizer(examples['text'])
snake_case_ = map(lowercase__)
snake_case_ = map(lowercase__ , batched=lowercase__)
snake_case_ = map(lowercase__ , function=lambda a_: None , batched=lowercase__)
with dataset.formatted_as(type='numpy'):
snake_case_ = map(lowercase__ , function=lambda a_: None , batched=lowercase__)
with dataset.formatted_as(type='pandas'):
snake_case_ = map(lowercase__ , function=lambda a_: None , batched=lowercase__)
with dataset.formatted_as(type='torch' , columns='numbers'):
snake_case_ = map(lowercase__ , function=lambda a_: None , batched=lowercase__)
with dataset.formatted_as(type='tensorflow' , columns='numbers'):
snake_case_ = map(lowercase__ , function=lambda a_: None , batched=lowercase__)
snake_case_ = map(lowercase__ , function=lowercase__ , batched=lowercase__)
snake_case_ = filter(lowercase__)
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase__ , 'wb') as f:
f.write(json.dumps(lowercase__).encode('utf-8'))
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 178 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A__ ( A__):
A_ : Union[str, Any] = '''decision_transformer'''
A_ : Dict = ['''past_key_values''']
A_ : Any = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=17 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=1_28 , _SCREAMING_SNAKE_CASE=40_96 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=5_02_56 , _SCREAMING_SNAKE_CASE=5_02_56 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : List[Any] = state_dim
__lowerCAmelCase : Any = act_dim
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : List[Any] = max_ep_len
__lowerCAmelCase : Optional[int] = action_tanh
__lowerCAmelCase : List[str] = vocab_size
__lowerCAmelCase : Dict = n_positions
__lowerCAmelCase : Optional[int] = n_layer
__lowerCAmelCase : Optional[Any] = n_head
__lowerCAmelCase : List[str] = n_inner
__lowerCAmelCase : str = activation_function
__lowerCAmelCase : Union[str, Any] = resid_pdrop
__lowerCAmelCase : Dict = embd_pdrop
__lowerCAmelCase : List[str] = attn_pdrop
__lowerCAmelCase : Union[str, Any] = layer_norm_epsilon
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : Any = scale_attn_weights
__lowerCAmelCase : int = use_cache
__lowerCAmelCase : int = scale_attn_by_inverse_layer_idx
__lowerCAmelCase : Optional[Any] = reorder_and_upcast_attn
__lowerCAmelCase : int = bos_token_id
__lowerCAmelCase : Dict = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) | 86 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9 | 0 |
import torch
from diffusers import StableDiffusionPipeline
SCREAMING_SNAKE_CASE :Dict = 'path-to-your-trained-model'
SCREAMING_SNAKE_CASE :int = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
SCREAMING_SNAKE_CASE :Dict = 'A photo of sks dog in a bucket'
SCREAMING_SNAKE_CASE :Dict = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 159 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 0 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = LongformerTokenizer
lowerCAmelCase_ = True
lowerCAmelCase_ = LongformerTokenizerFast
lowerCAmelCase_ = True
def lowercase (self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_snake_case = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
def lowercase (self , **UpperCAmelCase ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def lowercase (self , **UpperCAmelCase ) -> str:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def lowercase (self , UpperCAmelCase ) -> int:
_snake_case = '''lower newer'''
_snake_case = '''lower newer'''
return input_text, output_text
def lowercase (self ) -> List[str]:
_snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = '''lower newer'''
_snake_case = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_snake_case = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def lowercase (self ) -> List[Any]:
_snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowerCAmelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowerCAmelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowercase (self ) -> List[Any]:
_snake_case = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
_snake_case = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__ )
_snake_case = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__ )
_snake_case = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_snake_case = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase (self ) -> Tuple:
_snake_case = self.get_tokenizer()
_snake_case = '''Encode this sequence.'''
_snake_case = tokenizer.byte_encoder[''' '''.encode("""utf-8""" )[0]]
# Testing encoder arguments
_snake_case = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_snake_case = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
_snake_case = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
_snake_case = '''<mask>'''
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
_snake_case = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
_snake_case = '''Encode <mask> sequence'''
_snake_case = '''Encode <mask>sequence'''
_snake_case = tokenizer.encode(lowerCAmelCase__ )
_snake_case = encoded.index(lowerCAmelCase__ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_snake_case = tokenizer.encode(lowerCAmelCase__ )
_snake_case = encoded.index(lowerCAmelCase__ )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase (self ) -> List[Any]:
pass
def lowercase (self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_snake_case = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
_snake_case = '''A, <mask> AllenNLP sentence.'''
_snake_case = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
_snake_case = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def lowercase (self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowerCAmelCase__ )
def lowercase (self ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_snake_case = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_snake_case = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_snake_case = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_snake_case = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_snake_case = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_snake_case = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_snake_case = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
_snake_case = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , ) | 341 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 0 |
"""simple docstring"""
from __future__ import annotations
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : list[list[int]] = []
create_all_state(1 , lowercase__ , lowercase__ , [] , lowercase__ )
return result
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> str:
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowercase__ , total_number - level + 2 ):
current_list.append(lowercase__ )
create_all_state(i + 1 , lowercase__ , level - 1 , lowercase__ , lowercase__ )
current_list.pop()
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
for i in total_list:
print(*lowercase__ )
if __name__ == "__main__":
lowercase__ = 4
lowercase__ = 2
lowercase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 241 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( A__ ):
__a = ['''pixel_values''']
def __init__( self : Optional[Any] , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Dict[str, int]] = None , _lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : bool = True , _lowerCamelCase : Union[int, float] = 1 / 255 , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , **_lowerCamelCase : Tuple , ):
super().__init__(**lowerCAmelCase__ )
_snake_case = size if size is not None else {'''shortest_edge''': 256}
_snake_case = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_snake_case = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_snake_case = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase ( self : Any , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : int , ):
_snake_case = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase ( self : List[Any] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : str , ):
_snake_case = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : int ):
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase ( self : Any , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : str , ):
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def lowercase ( self : List[Any] , _lowerCamelCase : ImageInput , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : bool = None , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowerCamelCase : Optional[int] , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_snake_case = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def lowercase ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Tuple] = None ):
_snake_case = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase__ ):
_snake_case = target_sizes.numpy()
_snake_case = []
for idx in range(len(lowerCAmelCase__ ) ):
_snake_case = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
_snake_case = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_snake_case = logits.argmax(dim=1 )
_snake_case = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 288 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 0 |
class a__ :
"""simple docstring"""
def __init__( self , lowercase ) -> None:
'''simple docstring'''
A__ = len(lowerCAmelCase__ )
A__ = [0] * len_array
if len_array > 0:
A__ = array[0]
for i in range(1 , lowerCAmelCase__ ):
A__ = self.prefix_sum[i - 1] + array[i]
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCamelCase ( self , lowercase ) -> bool:
'''simple docstring'''
A__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 0 |
'''simple docstring'''
from math import sqrt
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (
number >= 0
), "'number' must been an int and positive"
_a = True
# 0 and 1 are none primes.
if number <= 1:
_a = False
for divisor in range(2 , int(round(sqrt(lowercase__))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_a = False
break
# precondition
assert isinstance(lowercase__ , lowercase__), "'status' must been from type bool"
return status
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_a = list(range(2 , n + 1))
_a = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowercase__)):
for j in range(i + 1 , len(lowercase__)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_a = 0
# filters actual prime numbers.
_a = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowercase__ , lowercase__), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (n > 2), "'N' must been an int and > 2"
_a = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1):
if is_prime(lowercase__):
ans.append(lowercase__)
# precondition
assert isinstance(lowercase__ , lowercase__), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and number >= 0, "'number' must been an int and >= 0"
_a = [] # this list will be returns of the function.
# potential prime number factors.
_a = 2
_a = number
if number == 0 or number == 1:
ans.append(lowercase__)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowercase__):
while quotient != 1:
if is_prime(lowercase__) and (quotient % factor == 0):
ans.append(lowercase__)
quotient /= factor
else:
factor += 1
else:
ans.append(lowercase__)
# precondition
assert isinstance(lowercase__ , lowercase__), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(lowercase__)
_a = max(lowercase__)
# precondition
assert isinstance(lowercase__ , lowercase__), "'ans' must been from type int"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(lowercase__)
_a = min(lowercase__)
# precondition
assert isinstance(lowercase__ , lowercase__), "'ans' must been from type int"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowercase__), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowercase__), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase (__A):
"""simple docstring"""
assert (
isinstance(lowercase__ , lowercase__) and (number > 2) and is_even(lowercase__)
), "'number' must been an int, even and > 2"
_a = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_a = get_prime_numbers(lowercase__)
_a = len(lowercase__)
# run variable for while-loops.
_a = 0
_a = None
# exit variable. for break up the loops
_a = True
while i < len_pn and loop:
_a = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_a = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__)
and (len(lowercase__) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(lowercase__ , lowercase__)
and isinstance(lowercase__ , lowercase__)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_a = 0
while numbera != 0:
_a = numbera % numbera
_a = numbera
_a = rest
# precondition
assert isinstance(lowercase__ , lowercase__) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(lowercase__ , lowercase__)
and isinstance(lowercase__ , lowercase__)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_a = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_a = prime_factorization(lowercase__)
_a = prime_factorization(lowercase__)
elif numbera == 1 or numbera == 1:
_a = []
_a = []
_a = max(lowercase__ , lowercase__)
_a = 0
_a = 0
_a = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_a = prime_fac_a.count(lowercase__)
_a = prime_fac_a.count(lowercase__)
for _ in range(max(lowercase__ , lowercase__)):
ans *= n
else:
_a = prime_fac_a.count(lowercase__)
for _ in range(lowercase__):
ans *= n
done.append(lowercase__)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_a = prime_fac_a.count(lowercase__)
for _ in range(lowercase__):
ans *= n
done.append(lowercase__)
# precondition
assert isinstance(lowercase__ , lowercase__) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (n >= 0), "'number' must been a positive int"
_a = 0
_a = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowercase__):
ans += 1
# precondition
assert isinstance(lowercase__ , lowercase__) and is_prime(
lowercase__), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
is_prime(lowercase__) and is_prime(lowercase__) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_a = p_number_a + 1 # jump to the next number
_a = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowercase__):
number += 1
while number < p_number_a:
ans.append(lowercase__)
number += 1
# fetch the next prime number.
while not is_prime(lowercase__):
number += 1
# precondition
assert (
isinstance(lowercase__ , lowercase__)
and ans[0] != p_number_a
and ans[len(lowercase__) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (n >= 1), "'n' must been int and >= 1"
_a = [] # will be returned.
for divisor in range(1 , n + 1):
if n % divisor == 0:
ans.append(lowercase__)
# precondition
assert ans[0] == 1 and ans[len(lowercase__) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (
number > 1
), "'number' must been an int and >= 1"
_a = get_divisors(lowercase__)
# precondition
assert (
isinstance(lowercase__ , lowercase__)
and (divisors[0] == 1)
and (divisors[len(lowercase__) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(lowercase__ , lowercase__)
and isinstance(lowercase__ , lowercase__)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_a = gcd(abs(lowercase__) , abs(lowercase__))
# precondition
assert (
isinstance(lowercase__ , lowercase__)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (n >= 0), "'n' must been a int and >= 0"
_a = 1 # this will be return.
for factor in range(1 , n + 1):
ans *= factor
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(lowercase__ , lowercase__) and (n >= 0), "'n' must been an int and >= 0"
_a = 0
_a = 1
_a = 1 # this will be return
for _ in range(n - 1):
_a = ans
ans += fiba
_a = tmp
return ans
| 211 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 0 |
'''simple docstring'''
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : Any = {
'vocab_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json',
},
'merges_file': {
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt',
},
'tokenizer_file': {
'Salesforce/codegen-350M-mono': (
'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase : Tuple = {
'Salesforce/codegen-350M-mono': 2_0_4_8,
}
class lowerCAmelCase__ ( A__ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase__ = CodeGenTokenizer
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[int]="<|endoftext|>" , __SCREAMING_SNAKE_CASE : Optional[int]="<|endoftext|>" , __SCREAMING_SNAKE_CASE : str="<|endoftext|>" , __SCREAMING_SNAKE_CASE : Any=False , **__SCREAMING_SNAKE_CASE : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
if kwargs.pop("""add_bos_token""" , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'
f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , pre_tok_state.pop("""type""" ) )
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCAmelCase__ ( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase__ ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> BatchEncoding:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = super().decode(
token_ids=lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , **lowerCAmelCase__ , )
if truncate_before_pattern is not None and len(lowerCAmelCase__ ) > 0:
__SCREAMING_SNAKE_CASE = self.truncate(lowerCAmelCase__ , lowerCAmelCase__ )
return decoded_text
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
def find_re(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int ):
__SCREAMING_SNAKE_CASE = pattern.search(lowerCAmelCase__ , lowerCAmelCase__ )
return m.start() if m else -1
__SCREAMING_SNAKE_CASE = [re.compile(lowerCAmelCase__ , re.MULTILINE ) for pattern in truncate_before_pattern]
__SCREAMING_SNAKE_CASE = list(re.finditer("""^print""" , lowerCAmelCase__ , re.MULTILINE ) )
if len(lowerCAmelCase__ ) > 1:
__SCREAMING_SNAKE_CASE = completion[: prints[1].start()]
__SCREAMING_SNAKE_CASE = list(re.finditer("""^def""" , lowerCAmelCase__ , re.MULTILINE ) )
if len(lowerCAmelCase__ ) > 1:
__SCREAMING_SNAKE_CASE = completion[: defs[1].start()]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = [
pos for pos in [find_re(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) for terminal in terminals] if pos != -1
]
if len(lowerCAmelCase__ ) > 0:
return completion[: min(lowerCAmelCase__ )]
else:
return completion
| 267 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 | 0 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(lowercase__ , lowercase__ ):
snake_case_ : List[str] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowercase__ )
if number < 0:
return False
snake_case_ : Any = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : int = old_name
if "patch_embed" in old_name:
lowerCamelCase__ : Any = old_name.split('.' )
if layer == "0":
lowerCamelCase__ : Optional[Any] = old_name.replace('0' , 'convolution1' )
elif layer == "1":
lowerCamelCase__ : Tuple = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
lowerCamelCase__ : List[str] = old_name.replace('3' , 'convolution2' )
else:
lowerCamelCase__ : Optional[Any] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , lowercase__ ):
lowerCamelCase__ : List[Any] = R'''\b\d{2}\b'''
if bool(re.search(lowercase__ , lowercase__ ) ):
lowerCamelCase__ : Dict = re.search(r'\d\.\d\d.' , lowercase__ ).group()
else:
lowerCamelCase__ : Optional[int] = re.search(r'\d\.\d.' , lowercase__ ).group()
if int(match[0] ) < 6:
lowerCamelCase__ : List[Any] = old_name.replace(lowercase__ , '' )
lowerCamelCase__ : int = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
lowerCamelCase__ : Optional[Any] = '''intermediate_stages.''' + trimmed_name
else:
lowerCamelCase__ : Any = old_name.replace(lowercase__ , '' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCamelCase__ : Dict = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
lowerCamelCase__ : Optional[Any] = str(int(match[2] ) - num_meta4D_last_stage )
lowerCamelCase__ : Optional[Any] = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
lowerCamelCase__ : Union[str, Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
lowerCamelCase__ : Union[str, Any] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
lowerCamelCase__ : Union[str, Any] = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
lowerCamelCase__ : Dict = trimmed_name.replace('fc2' , 'linear_out' )
lowerCamelCase__ : List[str] = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , lowercase__ ):
lowerCamelCase__ : List[Any] = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
lowerCamelCase__ : str = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCamelCase__ : Union[str, Any] = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCamelCase__ : Union[str, Any] = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
lowerCamelCase__ : List[Any] = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
lowerCamelCase__ : Dict = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
lowerCamelCase__ : Tuple = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
lowerCamelCase__ : Optional[Any] = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCamelCase__ : int = new_name.replace('norm' , 'layernorm' )
lowerCamelCase__ : Union[str, Any] = '''efficientformer.''' + new_name
else:
lowerCamelCase__ : Dict = '''efficientformer.encoder.''' + new_name
return new_name
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
for key in checkpoint.copy().keys():
lowerCamelCase__ : Tuple = checkpoint.pop(lowercase__ )
lowerCamelCase__ : Tuple = val
return checkpoint
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Dict = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
lowerCamelCase__ : Tuple = torch.load(lowercase__ , map_location='cpu' )['''model''']
lowerCamelCase__ : Optional[Any] = EfficientFormerConfig.from_json_file(lowercase__ )
lowerCamelCase__ : Tuple = EfficientFormerForImageClassificationWithTeacher(lowercase__ )
lowerCamelCase__ : List[Any] = '''_'''.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
lowerCamelCase__ : List[str] = config.depths[-1] - config.num_metaad_blocks + 1
lowerCamelCase__ : int = convert_torch_checkpoint(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
lowerCamelCase__ : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Any = 256
lowerCamelCase__ : int = 224
lowerCamelCase__ : List[str] = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
lowerCamelCase__ : int = processor(images=lowercase__ , return_tensors='pt' ).pixel_values
# original processing pipeline
lowerCamelCase__ : int = Compose(
[
Resize(lowercase__ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(lowercase__ ),
ToTensor(),
Normalize(lowercase__ , lowercase__ ),
] )
lowerCamelCase__ : List[str] = image_transforms(lowercase__ ).unsqueeze(0 )
assert torch.allclose(lowercase__ , lowercase__ )
lowerCamelCase__ : Optional[Any] = model(lowercase__ )
lowerCamelCase__ : Tuple = outputs.logits
lowerCamelCase__ : Union[str, Any] = (1, 1000)
if "l1" in model_name:
lowerCamelCase__ : Optional[int] = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , lowercase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCamelCase__ : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , lowercase__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCamelCase__ : str = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase__ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=lowercase__ , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""",
default=None,
type=str,
required=True,
help="""Path to EfficientFormer pytorch checkpoint.""",
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for EfficientFormer model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
parser.add_argument(
"""--no-push_to_hub""",
dest="""push_to_hub""",
action="""store_false""",
help="""Do not push model and image processor to the hub""",
)
parser.set_defaults(push_to_hub=True)
_UpperCAmelCase : str = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 50 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __UpperCAmelCase ( *a_ , a_ = None , a_=True , a_=2):
from .. import __version__
snake_case_ = take_from
snake_case_ = ()
if not isinstance(args[0] , lowercase__):
snake_case_ = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__).base_version) >= version.parse(lowercase__):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''')
snake_case_ = None
if isinstance(lowercase__ , lowercase__) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__),)
snake_case_ = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__):
values += (getattr(lowercase__ , lowercase__),)
snake_case_ = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
snake_case_ = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
snake_case_ = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__)
if isinstance(lowercase__ , lowercase__) and len(lowercase__) > 0:
snake_case_ = inspect.getouterframes(inspect.currentframe())[1]
snake_case_ = call_frame.filename
snake_case_ = call_frame.lineno
snake_case_ = call_frame.function
snake_case_ = next(iter(deprecated_kwargs.items()))
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''')
if len(lowercase__) == 0:
return
elif len(lowercase__) == 1:
return values[0]
return values
| 178 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = tmp_path / '''cache'''
__lowerCAmelCase : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : Union[str, Any] = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = tmp_path / '''cache'''
__lowerCAmelCase : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
__lowerCAmelCase : Any = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : str = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = tmp_path / '''cache'''
__lowerCAmelCase : Dict = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__lowerCAmelCase : Tuple = features.copy() if features else default_expected_features
__lowerCAmelCase : Dict = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : Optional[Any] = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
__lowerCAmelCase : Optional[Any] = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__lowerCAmelCase : List[Any] = features.copy()
__lowerCAmelCase : List[Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : Dict = tmp_path / '''cache'''
__lowerCAmelCase : Dict = JsonDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = tmp_path / '''cache'''
__lowerCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase : List[str] = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if issubclass(lowercase__ , lowercase__ ):
__lowerCAmelCase : Dict = jsonl_path
elif issubclass(lowercase__ , lowercase__ ):
__lowerCAmelCase : int = [jsonl_path]
__lowerCAmelCase : int = tmp_path / '''cache'''
__lowerCAmelCase : Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase : List[Any] = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_dataset(lowercase__ , lowercase__ )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=("train",) ):
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
__lowerCAmelCase : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = tmp_path / '''cache'''
__lowerCAmelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCAmelCase : Dict = JsonDatasetReader({'train': jsonl_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = tmp_path / '''cache'''
__lowerCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
__lowerCAmelCase : Optional[Any] = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCAmelCase : List[Any] = JsonDatasetReader({'train': jsonl_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if split:
__lowerCAmelCase : Union[str, Any] = {split: jsonl_path}
else:
__lowerCAmelCase : str = '''train'''
__lowerCAmelCase : List[str] = {'''train''': jsonl_path, '''test''': jsonl_path}
__lowerCAmelCase : Union[str, Any] = tmp_path / '''cache'''
__lowerCAmelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__lowerCAmelCase : Dict = JsonDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_json_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __lowerCAmelCase (_UpperCamelCase ):
return json.load(lowercase__ )
def __lowerCAmelCase (_UpperCamelCase ):
return [json.loads(lowercase__ ) for line in buffer]
class A__ :
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ ).write()
buffer.seek(0 )
__lowerCAmelCase : Tuple = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ ).write()
buffer.seek(0 )
__lowerCAmelCase : List[Any] = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase__ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase : Tuple = load_json_function(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert isinstance(exported_content[0] , lowerCAmelCase__ )
assert len(lowerCAmelCase__ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , lines=lowerCAmelCase__ , orient=lowerCAmelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__lowerCAmelCase : Tuple = load_json(lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCAmelCase__ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(lowerCAmelCase__ ) == 10
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
with pytest.raises(lowerCAmelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = tmp_path_factory.mktemp('data' ) / f"test.json.{extension}"
__lowerCAmelCase : Dict = str(shared_datadir / f"test_file.json.{extension}" )
JsonDatasetWriter(lowerCAmelCase__ , lowerCAmelCase__ , compression=lowerCAmelCase__ ).write()
with fsspec.open(lowerCAmelCase__ , 'rb' , compression='infer' ) as f:
__lowerCAmelCase : Dict = f.read()
with fsspec.open(lowerCAmelCase__ , 'rb' , compression='infer' ) as f:
__lowerCAmelCase : str = f.read()
assert exported_content == original_content | 86 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ = parent
def lowerCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return {}
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
snake_case_ = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
snake_case_ = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
# Initialize feature_extractor
snake_case_ = self.feature_extraction_class()
# Test not batched input
snake_case_ = get_html_strings()[0]
snake_case_ = feature_extractor(lowerCAmelCase__ )
# fmt: off
snake_case_ = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
snake_case_ = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
snake_case_ = get_html_strings()
snake_case_ = feature_extractor(lowerCAmelCase__ )
# fmt: off
snake_case_ = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
snake_case_ = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 159 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__lowerCAmelCase = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
__lowerCAmelCase = 'sshleifer/student_marian_en_ro_6_1'
__lowerCAmelCase = 'sshleifer/tiny-mbart'
@require_torch
class _lowerCAmelCase ( A__ ):
'''simple docstring'''
def lowercase (self , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , ) -> Union[str, Any]:
_snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowerCAmelCase__ , num_train_epochs=1 , distributed=lowerCAmelCase__ , extra_args_str=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , do_predict=lowerCAmelCase__ , )
_snake_case = TrainerState.load_from_json(os.path.join(lowerCAmelCase__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_snake_case = [log for log in logs if '''eval_loss''' in log.keys()]
_snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , lowerCAmelCase__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowercase (self ) -> Dict:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowercase (self ) -> Tuple:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ )
@require_torch_multi_gpu
def lowercase (self ) -> List[Any]:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowercase (self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowercase (self ) -> List[Any]:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowercase (self ) -> Any:
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=lowerCAmelCase__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowercase (self ) -> Dict:
self.run_seqaseq_quick(
distributed=lowerCAmelCase__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=lowerCAmelCase__ )
@require_apex
@require_torch_gpu
def lowercase (self ) -> List[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowerCAmelCase__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowercase (self , UpperCAmelCase ) -> Tuple:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_snake_case = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
_snake_case = experiments[experiment_id]
_snake_case = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
_snake_case = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowerCAmelCase__ , extra_args_str=data["""extra_args_str"""] )
_snake_case = len(re.findall(lowerCAmelCase__ , cl.err ) )
self.assertEqual(lowerCAmelCase__ , data["""n_matches"""] )
@slow
def lowercase (self ) -> List[Any]:
_snake_case = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowerCAmelCase__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=lowerCAmelCase__ , )
# Check metrics
_snake_case = TrainerState.load_from_json(os.path.join(lowerCAmelCase__ , """trainer_state.json""" ) ).log_history
_snake_case = [log for log in logs if '''eval_loss''' in log.keys()]
_snake_case = eval_metrics[0]
_snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , lowerCAmelCase__ )
# test if do_predict saves generations and metrics
_snake_case = os.listdir(lowerCAmelCase__ )
_snake_case = {os.path.basename(lowerCAmelCase__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowercase (self ) -> Optional[Any]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCAmelCase ) -> Tuple[int, float]:
_snake_case = '''--skip_memory_metrics 0'''
_snake_case = self.run_trainer(
max_len=128 , model_name=lowerCAmelCase__ , learning_rate=3e-4 , num_train_epochs=1 , optim=lowerCAmelCase__ , distributed=lowerCAmelCase__ , extra_args_str=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , do_predict=lowerCAmelCase__ , n_gpus_to_use=1 , )
# Check metrics
_snake_case = TrainerState.load_from_json(Path(lowerCAmelCase__ , """trainer_state.json""" ) ).log_history
_snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_snake_case = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
_snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_snake_case = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowerCAmelCase__ , lowerCAmelCase__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
lowerCAmelCase__ , lowerCAmelCase__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
lowerCAmelCase__ , lowerCAmelCase__ , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 3e-3 , UpperCAmelCase = "adafactor" , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , ) -> List[Any]:
_snake_case = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
_snake_case = self.get_auto_remove_tmp_dir()
_snake_case = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(lowerCAmelCase__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(lowerCAmelCase__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
_snake_case = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(lowerCAmelCase__ )}
""".split()
_snake_case = '''
--do_predict
'''.split()
_snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_snake_case = get_gpu_count()
_snake_case = get_torch_dist_unique_port()
_snake_case = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
_snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
else:
_snake_case = ['''run_translation.py'''] + args
with patch.object(lowerCAmelCase__ , """argv""" , lowerCAmelCase__ ):
main()
return output_dir | 341 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase__ = logging.get_logger(__name__)
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : int = ['''input_features''', '''attention_mask''']
def __init__( self : int , a_ : List[Any]=80 , a_ : Union[str, Any]=1_60_00 , a_ : List[str]=80 , a_ : str=0.0 , a_ : Union[str, Any]=True , a_ : List[Any]=True , a_ : Union[str, Any]=True , **a_ : Tuple , ):
super().__init__(feature_size=lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , padding_value=lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCAmelCase_ : Any = num_mel_bins
lowerCAmelCase_ : List[str] = do_ceptral_normalize
lowerCAmelCase_ : Any = normalize_means
lowerCAmelCase_ : Dict = normalize_vars
lowerCAmelCase_ : Any = True
def lowerCamelCase ( self : str , a_ : np.ndarray , ):
lowerCAmelCase_ : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCAmelCase_ : str = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 )
lowerCAmelCase_ : Union[str, Any] = ta_kaldi.fbank(lowerCAmelCase__ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCamelCase ( a_ : np.ndarray , a_ : int , a_ : Optional[bool] = True , a_ : Optional[bool] = True , a_ : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
lowerCAmelCase_ : int = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : List[str] = np.subtract(lowerCAmelCase__ , lowerCAmelCase__ )
if normalize_vars:
lowerCAmelCase_ : Optional[int] = x[:input_length].std(axis=0 )
lowerCAmelCase_ : Union[str, Any] = np.divide(lowerCAmelCase__ , lowerCAmelCase__ )
if input_length < x.shape[0]:
lowerCAmelCase_ : Tuple = padding_value
# make sure array is in float32
lowerCAmelCase_ : Union[str, Any] = x.astype(np.floataa )
return x
def lowerCamelCase ( self : str , a_ : List[np.ndarray] , a_ : Optional[np.ndarray] = None ):
lowerCAmelCase_ : Optional[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase__ , lowerCAmelCase__ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
def __call__( self : int , a_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Optional[int] = None , a_ : bool = False , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Optional[int] = None , a_ : Optional[bool] = None , **a_ : int , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Tuple = isinstance(lowerCAmelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : List[str] = is_batched_numpy or (
isinstance(lowerCAmelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[Any] = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ , np.ndarray ):
lowerCAmelCase_ : Optional[Any] = np.asarray(lowerCAmelCase__ , dtype=np.floataa )
elif isinstance(lowerCAmelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Optional[Any] = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Dict = [self._extract_fbank_features(lowerCAmelCase__ ) for waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : Tuple = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Tuple = self.pad(
lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
# make sure list is in array format
lowerCAmelCase_ : str = padded_inputs.get("input_features" )
if isinstance(input_features[0] , lowerCAmelCase__ ):
lowerCAmelCase_ : int = [np.asarray(lowerCAmelCase__ , dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : Dict = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Tuple = [np.asarray(lowerCAmelCase__ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCAmelCase_ : Tuple = (
np.array(lowerCAmelCase__ , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase_ : List[str] = self.normalize(
padded_inputs["input_features"] , attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 241 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 0 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
UpperCAmelCase__ = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any ) -> List[str]:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
return (preds == labels).mean()
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] ) -> Dict:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
_snake_case = simple_accuracy(lowercase__ , lowercase__ )
_snake_case = fa_score(y_true=lowercase__ , y_pred=lowercase__ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] ) -> List[Any]:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
_snake_case = pearsonr(lowercase__ , lowercase__ )[0]
_snake_case = spearmanr(lowercase__ , lowercase__ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> Dict:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
assert len(lowercase__ ) == len(lowercase__ ), f'''Predictions and labels have mismatched lengths {len(lowercase__ )} and {len(lowercase__ )}'''
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowercase__ , lowercase__ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "mrpc":
return acc_and_fa(lowercase__ , lowercase__ )
elif task_name == "sts-b":
return pearson_and_spearman(lowercase__ , lowercase__ )
elif task_name == "qqp":
return acc_and_fa(lowercase__ , lowercase__ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
else:
raise KeyError(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ) -> Dict:
warnings.warn(lowercase__ , lowercase__ )
requires_backends(lowercase__ , '''sklearn''' )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(f'''Predictions and labels have mismatched lengths {len(lowercase__ )} and {len(lowercase__ )}''' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowercase__ , lowercase__ )}
else:
raise KeyError(lowercase__ )
| 288 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 0 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( A__ ):
"""simple docstring"""
def __init__( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , **lowercase , ) -> Tuple:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
A__ = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
A__ = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
if self.streaming:
A__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A__ = None
A__ = None
A__ = None
A__ = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
A__ = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 68 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __A ( metaclass=A__ ):
'''simple docstring'''
__lowerCamelCase : Dict = ['''torch''', '''scipy''']
def __init__(self , *A , **A ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def a__ (cls , *A , **A ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def a__ (cls , *A , **A ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 211 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase : str = 2_5_0_0_0_4
UpperCAmelCase : Any = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = MBartTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = '''facebook/mbart-large-en-ro'''
lowerCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCAmelCase__ = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def UpperCAmelCase__ ( cls : str ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__SCREAMING_SNAKE_CASE = 1
return cls
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
__SCREAMING_SNAKE_CASE = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCAmelCase__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_026, 250_001] )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def UpperCAmelCase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__SCREAMING_SNAKE_CASE = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = targets['''input_ids''']
__SCREAMING_SNAKE_CASE = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
"""input_ids""": [[62, 3_034, 2, 250_004]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 267 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[str] = {}
snake_case_ : Optional[Any] = job['''started_at''']
snake_case_ : List[str] = job['''completed_at''']
snake_case_ : List[str] = date_parser.parse(lowercase__ )
snake_case_ : Optional[int] = date_parser.parse(lowercase__ )
snake_case_ : Optional[int] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case_ : Any = start
snake_case_ : Optional[int] = end
snake_case_ : Dict = duration_in_min
return job_info
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[Any] = None
if token is not None:
snake_case_ : Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
snake_case_ : int = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case_ : int = requests.get(lowercase__ , headers=lowercase__ ).json()
snake_case_ : Optional[Any] = {}
try:
job_time.update({job['name']: extract_time_from_single_job(lowercase__ ) for job in result['jobs']} )
snake_case_ : Optional[int] = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(lowercase__ ):
snake_case_ : Optional[int] = requests.get(url + f"""&page={i + 2}""" , headers=lowercase__ ).json()
job_time.update({job['name']: extract_time_from_single_job(lowercase__ ) for job in result['jobs']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = get_job_time(args.workflow_run_id)
_SCREAMING_SNAKE_CASE = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 327 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , UpperCAmelCase : int = 16 , UpperCAmelCase : int = 88 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : int = 32 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "geglu" , UpperCAmelCase : Optional[int] = None , ) -> int:
super().__init__()
lowerCamelCase__ : int = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , in_channels=lowerCAmelCase__ , num_layers=lowerCAmelCase__ , dropout=lowerCAmelCase__ , norm_num_groups=lowerCAmelCase__ , cross_attention_dim=lowerCAmelCase__ , attention_bias=lowerCAmelCase__ , sample_size=lowerCAmelCase__ , num_vector_embeds=lowerCAmelCase__ , activation_fn=lowerCAmelCase__ , num_embeds_ada_norm=lowerCAmelCase__ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCamelCase__ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCamelCase__ : Optional[int] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCamelCase__ : List[str] = [1, 0]
def A_ ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : bool = True , ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = hidden_states
lowerCamelCase__ : Dict = []
lowerCamelCase__ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCamelCase__ : Optional[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCamelCase__ : str = self.transformer_index_for_condition[i]
lowerCamelCase__ : Optional[Any] = self.transformers[transformer_index](
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , timestep=lowerCAmelCase__ , cross_attention_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCamelCase__ : Tuple = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCamelCase__ : List[str] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase__ )
| 50 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['OwlViTFeatureExtractor']
lowercase = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 178 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 0 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
lowerCamelCase__ = True
except (ImportError, ModuleNotFoundError):
lowerCamelCase__ = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowerCAmelCase (_UpperCamelCase ):
re.sub('<n>' , '' , lowercase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase__ ) ) | 86 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=1_3 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : List[Any]=9_9 , _lowerCAmelCase : List[str]=3_2 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : int=3_7 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Optional[Any]=5_1_2 , _lowerCAmelCase : Union[str, Any]=1_6 , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : int=None , ) -> int:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = self.vocab_size - 1
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
snake_case_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , *_lowerCAmelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
snake_case_ = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , *_lowerCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
snake_case_ = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , *_lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , *_lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
snake_case_
) = config_and_inputs
snake_case_ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : int=False ) -> Dict:
"""simple docstring"""
snake_case_ = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
snake_case_ = inputs_dict['''labels''']
snake_case_ = inputs_dict['''labels''']
snake_case_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
snake_case_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def lowerCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = OpenAIGPTModelTester(self )
snake_case_ = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=3_7 )
def lowerCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self : int ) -> int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" )
model.to(lowerCAmelCase__ )
snake_case_ = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
snake_case_ = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case_ = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 159 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 0 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase = 'true'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=82 , _SCREAMING_SNAKE_CASE=16 ):
set_seed(42 )
_snake_case = RegressionModel()
_snake_case = deepcopy(lowercase__ )
_snake_case = RegressionDataset(length=lowercase__ )
_snake_case = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
_snake_case = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
_snake_case = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
_snake_case = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
_snake_case = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
_snake_case = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
_snake_case = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
if use_longest:
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
_snake_case = get_dataloader(lowercase__ , not dispatch_batches )
_snake_case = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=lowercase__ )
_snake_case = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = []
for batch in dataloader:
_snake_case = batch.values()
with torch.no_grad():
_snake_case = model(lowercase__ )
_snake_case = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_snake_case = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
_snake_case = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=82 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=16 ):
_snake_case = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
_snake_case = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}"""
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False ):
_snake_case = evaluate.load("""glue""" , """mrpc""" )
_snake_case = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
_snake_case = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
_snake_case = model(**lowercase__ )
_snake_case = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch["""labels"""] )
_snake_case = metric.compute()
# Then do distributed
_snake_case = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_snake_case = model(**lowercase__ )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case = batch['''labels''']
_snake_case = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
_snake_case = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_snake_case = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
_snake_case = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 341 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : List[str] = coefficient_matrix.shape
lowerCAmelCase_ : List[Any] = constant_matrix.shape
if rowsa != colsa:
lowerCAmelCase_ : int = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowercase__ )
if colsa != 1:
lowerCAmelCase_ : str = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowercase__ )
if rowsa != rowsa:
lowerCAmelCase_ : List[str] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowercase__ )
if len(lowercase__ ) != rowsa:
lowerCAmelCase_ : Tuple = (
'''Number of initial values must be equal to number of rows in coefficient '''
f'''matrix but received {len(lowercase__ )} and {rowsa}'''
)
raise ValueError(lowercase__ )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
lowerCAmelCase_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCAmelCase_ : List[Any] = table.shape
strictly_diagonally_dominant(lowercase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowercase__ ):
lowerCAmelCase_ : Union[str, Any] = []
for row in range(lowercase__ ):
lowerCAmelCase_ : int = 0
for col in range(lowercase__ ):
if col == row:
lowerCAmelCase_ : List[Any] = table[row][col]
elif col == cols - 1:
lowerCAmelCase_ : int = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCAmelCase_ : str = (temp + val) / denom
new_val.append(lowercase__ )
lowerCAmelCase_ : Optional[Any] = new_val
return [float(lowercase__ ) for i in new_val]
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = table.shape
lowerCAmelCase_ : Dict = True
for i in range(0 , lowercase__ ):
lowerCAmelCase_ : Optional[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9 | 0 |
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
_snake_case = name
_snake_case = value
_snake_case = weight
def __repr__( self : List[Any] ):
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowercase ( self : Union[str, Any] ):
return self.value
def lowercase ( self : Any ):
return self.name
def lowercase ( self : str ):
return self.weight
def lowercase ( self : Optional[int] ):
return self.value / self.weight
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : str ) -> Optional[Any]:
_snake_case = []
for i in range(len(lowercase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> List[Any]:
_snake_case = sorted(lowercase__ , key=lowercase__ , reverse=lowercase__ )
_snake_case = []
_snake_case = 0.0, 0.0
for i in range(len(lowercase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _UpperCAmelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase__ = datasets.load_iris()
lowerCAmelCase__ = np.array(data["""data"""])
lowerCAmelCase__ = np.array(data["""target"""])
lowerCAmelCase__ = data['target_names']
lowerCAmelCase__ = train_test_split(X, y)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Union[str, Any]=5 ) -> List[str]:
'''simple docstring'''
A__ = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
A__ = []
for data_point in data:
A__ = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A__ = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A__ = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 68 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if len(lowercase__) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1)
rec_insertion_sort(lowercase__ , n - 1)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if index >= len(lowercase__) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_a = (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1)
if __name__ == "__main__":
lowercase_ = input("Enter integers separated by spaces: ")
lowercase_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 211 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def a__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__SCREAMING_SNAKE_CASE = cst_fwd.get(lowercase__ , np.inf )
__SCREAMING_SNAKE_CASE = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__SCREAMING_SNAKE_CASE = new_cost_f
__SCREAMING_SNAKE_CASE = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__SCREAMING_SNAKE_CASE = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def a__ ( a__ , a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = -1
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = {source: 0}
__SCREAMING_SNAKE_CASE = {destination: 0}
__SCREAMING_SNAKE_CASE = {source: None}
__SCREAMING_SNAKE_CASE = {destination: None}
__SCREAMING_SNAKE_CASE = PriorityQueue()
__SCREAMING_SNAKE_CASE = PriorityQueue()
__SCREAMING_SNAKE_CASE = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__SCREAMING_SNAKE_CASE = queue_forward.get()
visited_forward.add(lowercase__ )
__SCREAMING_SNAKE_CASE = queue_backward.get()
visited_backward.add(lowercase__ )
__SCREAMING_SNAKE_CASE = pass_and_relaxation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
__SCREAMING_SNAKE_CASE = pass_and_relaxation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__SCREAMING_SNAKE_CASE = shortest_distance
return shortest_path_distance
UpperCAmelCase : int = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
UpperCAmelCase : Tuple = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 | 0 |
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 327 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCAmelCase : str = 'pt'
elif is_tf_available():
_UpperCAmelCase : Dict = 'tf'
else:
_UpperCAmelCase : Dict = 'jax'
class lowerCAmelCase ( A__, unittest.TestCase ):
UpperCAmelCase__ = PerceiverTokenizer
UpperCAmelCase__ = False
def A_ ( self : Any ) -> str:
super().setUp()
lowerCamelCase__ : int = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self : str ) -> List[Any]:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def A_ ( self : List[Any] , **UpperCAmelCase : Optional[int] ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def A_ ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[Any]=20 , UpperCAmelCase : str=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCamelCase__ : Union[str, Any] = []
for i in range(len(lowerCAmelCase__ ) ):
try:
lowerCamelCase__ : List[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCamelCase__ : Optional[int] = list(filter(lambda UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , lowerCAmelCase__ ) )
lowerCamelCase__ : Union[str, Any] = list(filter(lambda UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
lowerCamelCase__ : Dict = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
lowerCamelCase__ : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowerCamelCase__ : Optional[int] = [t[0] for t in toks]
# Ensure consistency
lowerCamelCase__ : Dict = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
lowerCamelCase__ : Any = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
lowerCamelCase__ : List[str] = ''' ''' + output_txt
lowerCamelCase__ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def A_ ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase__ : Any = self.perceiver_tokenizer
lowerCamelCase__ : str = '''Unicode €.'''
lowerCamelCase__ : Any = tokenizer(lowerCAmelCase__ )
lowerCamelCase__ : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase__ )
# decoding
lowerCamelCase__ : List[Any] = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '[CLS]Unicode €.[SEP]' )
lowerCamelCase__ : List[str] = tokenizer('e è é ê ë' )
lowerCamelCase__ : Tuple = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , lowerCAmelCase__ )
# decoding
lowerCamelCase__ : Any = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def A_ ( self : List[Any] ) -> int:
lowerCamelCase__ : List[Any] = self.perceiver_tokenizer
lowerCamelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowerCamelCase__ : Any = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCamelCase__ : Tuple = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
lowerCamelCase__ : Union[str, Any] = list(batch.input_ids.numpy()[0] )
else:
lowerCamelCase__ : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : List[str] = self.perceiver_tokenizer
lowerCamelCase__ : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase__ : List[Any] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , lowerCAmelCase__ )
self.assertIn('attention_mask' , lowerCAmelCase__ )
self.assertNotIn('decoder_input_ids' , lowerCAmelCase__ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase__ )
def A_ ( self : List[Any] ) -> List[str]:
lowerCamelCase__ : Dict = self.perceiver_tokenizer
lowerCamelCase__ : int = [
'''Summary of the text.''',
'''Another summary.''',
]
lowerCamelCase__ : Optional[int] = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='max_length' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A_ ( self : Union[str, Any] ) -> Dict:
# safety check on max_len default value so we are sure the test works
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCamelCase__ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : Dict = tempfile.mkdtemp()
lowerCamelCase__ : Tuple = ''' He is very happy, UNwant\u00E9d,running'''
lowerCamelCase__ : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
lowerCamelCase__ : int = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
lowerCamelCase__ : Optional[int] = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
lowerCamelCase__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCamelCase__ : int = tempfile.mkdtemp()
lowerCamelCase__ : List[str] = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['bim', 'bambam'] )
lowerCamelCase__ : Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCamelCase__ : Optional[int] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
lowerCamelCase__ : Tuple = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
lowerCamelCase__ : List[Any] = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCamelCase__ : Optional[int] = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def A_ ( self : List[Any] ) -> Optional[int]:
lowerCamelCase__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCamelCase__ : Tuple = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCamelCase__ : int = json.load(lowerCAmelCase__ )
lowerCamelCase__ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(125 )]
lowerCamelCase__ : List[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowerCamelCase__ : Tuple = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCamelCase__ : Optional[int] = tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCamelCase__ : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=lowerCAmelCase__ )]
lowerCamelCase__ : Any = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A_ ( self : Optional[Any] ) -> List[Any]:
lowerCamelCase__ : Optional[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def A_ ( self : Any ) -> Dict:
pass
def A_ ( self : Optional[int] ) -> List[Any]:
pass
def A_ ( self : Any ) -> str:
pass
def A_ ( self : Optional[int] ) -> Union[str, Any]:
pass
def A_ ( self : Union[str, Any] ) -> str:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCamelCase__ : Tuple = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase__ : int = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowerCamelCase__ : Any = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
| 50 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
lowercase = True
except (ImportError, AttributeError):
lowercase = object
def __UpperCAmelCase ( *a_ , **a_):
pass
lowercase = False
lowercase = logging.get_logger("transformers-cli/serving")
def __UpperCAmelCase ( a_):
snake_case_ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowercase__ , args.host , args.port , args.workers)
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = 42
lowerCAmelCase = 42
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = 42
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( a ) -> Any:
snake_case_ = parser.add_parser(
'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' )
serve_parser.add_argument(
'--task' , type=lowerCAmelCase__ , choices=get_supported_tasks() , help='The task to run the pipeline on' , )
serve_parser.add_argument('--host' , type=lowerCAmelCase__ , default='localhost' , help='Interface the server will listen on.' )
serve_parser.add_argument('--port' , type=lowerCAmelCase__ , default=88_88 , help='Port the serving will listen to.' )
serve_parser.add_argument('--workers' , type=lowerCAmelCase__ , default=1 , help='Number of http workers' )
serve_parser.add_argument('--model' , type=lowerCAmelCase__ , help='Model\'s name or path to stored model.' )
serve_parser.add_argument('--config' , type=lowerCAmelCase__ , help='Model\'s config name or path to stored model.' )
serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase__ , help='Tokenizer name to use.' )
serve_parser.add_argument(
'--device' , type=lowerCAmelCase__ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
serve_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self , a , a , a , a ) -> Any:
snake_case_ = pipeline
snake_case_ = host
snake_case_ = port
snake_case_ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'Using serve command requires FastAPI and uvicorn. '
'Please install transformers with [serving]: pip install "transformers[serving]".'
'Or install FastAPI and uvicorn separately.' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
snake_case_ = FastAPI(
routes=[
APIRoute(
'/' , self.model_info , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['GET'] , ),
APIRoute(
'/tokenize' , self.tokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/detokenize' , self.detokenize , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
APIRoute(
'/forward' , self.forward , response_model=lowerCAmelCase__ , response_class=lowerCAmelCase__ , methods=['POST'] , ),
] , timeout=6_00 , )
def _UpperCamelCase ( self ) -> Optional[int]:
run(self._app , host=self.host , port=self.port , workers=self.workers )
def _UpperCamelCase ( self ) -> Any:
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def _UpperCamelCase ( self , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> Any:
try:
snake_case_ = self._pipeline.tokenizer.tokenize(lowerCAmelCase__ )
if return_ids:
snake_case_ = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
return ServeTokenizeResult(tokens=lowerCAmelCase__ , tokens_ids=lowerCAmelCase__ )
else:
return ServeTokenizeResult(tokens=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
def _UpperCamelCase ( self , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , a = Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) , ) -> List[Any]:
try:
snake_case_ = self._pipeline.tokenizer.decode(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return ServeDeTokenizeResult(model='' , text=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'model': '', 'error': str(lowerCAmelCase__ )} )
async def _UpperCamelCase ( self , a=Body(lowerCAmelCase__ , embed=lowerCAmelCase__ ) ) -> Optional[int]:
# Check we don't have empty string
if len(lowerCAmelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
snake_case_ = self._pipeline(lowerCAmelCase__ )
return ServeForwardResult(output=lowerCAmelCase__ )
except Exception as e:
raise HTTPException(5_00 , {'error': str(lowerCAmelCase__ )} )
| 178 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class A__ ( A__):
A_ : List[Any] = '''imagegpt'''
A_ : int = ['''past_key_values''']
A_ : str = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=5_12 + 1 , _SCREAMING_SNAKE_CASE=32 * 32 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=24 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="quick_gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Any = n_positions
__lowerCAmelCase : List[str] = n_embd
__lowerCAmelCase : int = n_layer
__lowerCAmelCase : Any = n_head
__lowerCAmelCase : Any = n_inner
__lowerCAmelCase : Optional[int] = activation_function
__lowerCAmelCase : Dict = resid_pdrop
__lowerCAmelCase : int = embd_pdrop
__lowerCAmelCase : Any = attn_pdrop
__lowerCAmelCase : str = layer_norm_epsilon
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : List[str] = scale_attn_weights
__lowerCAmelCase : str = use_cache
__lowerCAmelCase : int = scale_attn_by_inverse_layer_idx
__lowerCAmelCase : List[str] = reorder_and_upcast_attn
__lowerCAmelCase : Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( A__):
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 32 , _SCREAMING_SNAKE_CASE = 32 , ):
__lowerCAmelCase : List[str] = self._generate_dummy_images(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCAmelCase : Tuple = dict(preprocessor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return inputs | 86 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _lowerCAmelCase ( )->Any:
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=lowercase__ , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=lowercase__ , default=5 )
parser.add_argument("--batch_size" , type=lowercase__ , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=lowercase__ , default=1 )
parser.add_argument("--freeze" , type=lowercase__ , default=lowercase__ )
parser.add_argument("--learning_rate" , type=lowercase__ , default=5e-4 )
parser.add_argument("--seed" , type=lowercase__ , default=0 )
parser.add_argument("--lr_scheduler_type" , type=lowercase__ , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=lowercase__ , default=10 )
parser.add_argument("--weight_decay" , type=lowercase__ , default=0.0_1 )
parser.add_argument("--output_dir" , type=lowercase__ , default="./results" )
return parser.parse_args()
SCREAMING_SNAKE_CASE :List[Any] = load('''accuracy''')
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->Any:
'''simple docstring'''
snake_case_ = eval_pred
snake_case_ = np.argmax(lowercase__ , axis=1 )
return metric.compute(predictions=lowercase__ , references=lowercase__ )
class __lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Any , _lowerCAmelCase : Union[str, Any] ) -> None:
"""simple docstring"""
super().__init__()
snake_case_ = trainer
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , **_lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if control.should_evaluate:
snake_case_ = deepcopy(lowerCAmelCase__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def _lowerCAmelCase ( )->List[Any]:
'''simple docstring'''
snake_case_ = get_args()
set_seed(args.seed )
snake_case_ = load_dataset("codeparrot/codecomplex" , split="train" )
snake_case_ = dataset.train_test_split(test_size=0.2 )
snake_case_ = train_test['''test'''].train_test_split(test_size=0.5 )
snake_case_ = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
snake_case_ = AutoTokenizer.from_pretrained(args.model_ckpt )
snake_case_ = tokenizer.eos_token
snake_case_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
snake_case_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
snake_case_ = False
snake_case_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(lowerCAmelCase_ :Dict ):
snake_case_ = tokenizer(example["src"] , truncation=lowercase__ , max_length=1_024 )
snake_case_ = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
snake_case_ = train_test_validation.map(
lowercase__ , batched=lowercase__ , remove_columns=train_test_validation["train"].column_names , )
snake_case_ = DataCollatorWithPadding(tokenizer=lowercase__ )
snake_case_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
snake_case_ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , )
print("Training..." )
trainer.add_callback(CustomCallback(lowercase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 159 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase = get_logger(__name__)
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=None ) -> int:
_snake_case = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
_snake_case = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = []
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> List[Any]:
_snake_case = obj
_snake_case = target
_snake_case = new
_snake_case = target.split(""".""" )[0]
_snake_case = {}
_snake_case = attrs or []
def __enter__(self ) -> Dict:
_snake_case = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
_snake_case = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_snake_case = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_snake_case = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
_snake_case = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
_snake_case = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_snake_case = getattr(import_module(""".""".join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
_snake_case = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_snake_case = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__(self , *UpperCAmelCase ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def lowercase (self ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def lowercase (self ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__() | 341 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
lowercase__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
lowercase__ = {
'allenai/longformer-base-4096': 4096,
'allenai/longformer-large-4096': 4096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : List[str] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
lowerCAmelCase_ : Dict = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Dict = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase_ : List[str] = char
return pairs
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[Any] = VOCAB_FILES_NAMES
a_ : Any = PRETRAINED_VOCAB_FILES_MAP
a_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , a_ : List[str] , a_ : Any , a_ : List[Any]="replace" , a_ : Union[str, Any]="<s>" , a_ : str="</s>" , a_ : str="</s>" , a_ : Any="<s>" , a_ : Optional[Any]="<unk>" , a_ : Tuple="<pad>" , a_ : Optional[Any]="<mask>" , a_ : Any=False , **a_ : str , ):
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : Any = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : str = bytes_to_unicode()
lowerCAmelCase_ : Any = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : List[Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Any = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Any = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowerCamelCase ( self : Tuple ):
return len(self.encoder )
def lowerCamelCase ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self : Union[str, Any] , a_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Optional[int] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : str = min(lowerCAmelCase__ , key=lambda a_ : self.bpe_ranks.get(lowerCAmelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ : Any = bigram
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : List[Any] = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : List[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = ''' '''.join(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = word
return word
def lowerCamelCase ( self : Tuple , a_ : List[Any] ):
lowerCAmelCase_ : Optional[int] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
lowerCAmelCase_ : str = ''''''.join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def lowerCamelCase ( self : Dict , a_ : Dict ):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self : int , a_ : List[str] ):
return self.decoder.get(lowerCAmelCase__ )
def lowerCamelCase ( self : str , a_ : str ):
lowerCAmelCase_ : Dict = ''''''.join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCamelCase ( self : Optional[int] , a_ : str , a_ : Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Optional[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Tuple = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCamelCase ( self : Any , a_ : List[int] , a_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def lowerCamelCase ( self : Union[str, Any] , a_ : List[int] , a_ : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : str , a_ : Union[str, Any] , a_ : List[str]=False , **a_ : Any ):
lowerCAmelCase_ : Tuple = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : int = ''' ''' + text
return (text, kwargs)
| 241 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( A__ , unittest.TestCase ):
__a = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def lowercase ( self : List[str] , _lowerCamelCase : List[Any]=0 ):
_snake_case = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowerCAmelCase__ ) )
_snake_case = torch.manual_seed(lowerCAmelCase__ )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase ( self : Any ):
_snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase__ ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase ( self : Any ):
_snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase__ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase ( self : int ):
_snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase__ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase ( self : Dict ):
_snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase__ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase ( self : Dict ):
_snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**lowerCAmelCase__ ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def lowercase ( self : Any ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase ( self : Tuple ):
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def lowercase ( self : Any ):
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCAmelCase__ , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_snake_case = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase ( self : int ):
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((128, 128) )
_snake_case = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_snake_case = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCAmelCase__ , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_snake_case = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 288 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__ ( A__ ):
"""simple docstring"""
__lowerCamelCase = '''unispeech-sat'''
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1e-5 , lowercase="group" , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=128 , lowercase=16 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase=320 , lowercase=2 , lowercase=0.1 , lowercase=100 , lowercase=256 , lowercase=256 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=504 , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
A__ = hidden_size
A__ = feat_extract_norm
A__ = feat_extract_activation
A__ = list(lowerCAmelCase__ )
A__ = list(lowerCAmelCase__ )
A__ = list(lowerCAmelCase__ )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
A__ = num_clusters
A__ = do_stable_layer_norm
A__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = apply_spec_augment
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ = num_codevectors_per_group
A__ = num_codevector_groups
A__ = contrastive_logits_temperature
A__ = feat_quantizer_dropout
A__ = num_negatives
A__ = codevector_dim
A__ = proj_codevector_dim
A__ = diversity_loss_weight
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ = list(lowerCAmelCase__ )
A__ = list(lowerCAmelCase__ )
A__ = list(lowerCAmelCase__ )
A__ = xvector_output_dim
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 68 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class __A ( A__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = '''xmod'''
def __init__(self , A=30_522 , A=768 , A=12 , A=12 , A=3_072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
_a = pre_norm
_a = adapter_reduction_factor
_a = adapter_layer_norm
_a = adapter_reuse_layer_norm
_a = ln_before_adapter
_a = list(lowerCAmelCase__ )
_a = default_language
class __A ( A__ ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 211 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 0 |
'''simple docstring'''
from itertools import product
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sides_number
__SCREAMING_SNAKE_CASE = max_face_number * dice_number
__SCREAMING_SNAKE_CASE = [0] * (max_total + 1)
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = range(lowercase__ , max_face_number + 1 )
for dice_numbers in product(lowercase__ , repeat=lowercase__ ):
__SCREAMING_SNAKE_CASE = sum(lowercase__ )
totals_frequencies[total] += 1
return totals_frequencies
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__SCREAMING_SNAKE_CASE = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 9
__SCREAMING_SNAKE_CASE = 4 * 9
__SCREAMING_SNAKE_CASE = 6
for peter_total in range(lowercase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__SCREAMING_SNAKE_CASE = (4**9) * (6**6)
__SCREAMING_SNAKE_CASE = peter_wins_count / total_games_number
__SCREAMING_SNAKE_CASE = round(lowercase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 267 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ : int = tempfile.mkdtemp()
# fmt: off
snake_case_ : Tuple = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
snake_case_ : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : Optional[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
snake_case_ : List[str] = {'''unk_token''': '''<unk>'''}
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
snake_case_ : Optional[Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
snake_case_ : List[str] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , **_A : Dict ) -> Dict:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] , **_A : Any ) -> str:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , **_A : int ) -> Optional[int]:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case_ : int = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ : int = self.get_tokenizer()
snake_case_ : Tuple = self.get_rust_tokenizer()
snake_case_ : List[str] = self.get_image_processor()
snake_case_ : str = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case_ : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
snake_case_ : List[str] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case_ : List[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case_ : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
snake_case_ : int = self.get_image_processor(do_normalize=lowerCAmelCase__ )
snake_case_ : str = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.get_image_processor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : Dict = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = self.prepare_image_inputs()
snake_case_ : Tuple = image_processor(lowerCAmelCase__ , return_tensors='np' )
snake_case_ : Any = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = self.get_image_processor()
snake_case_ : Any = self.get_tokenizer()
snake_case_ : Dict = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Optional[int] = '''lower newer'''
snake_case_ : Dict = processor(text=lowerCAmelCase__ , return_tensors='np' )
snake_case_ : Optional[int] = tokenizer(lowerCAmelCase__ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self : str ) -> str:
"""simple docstring"""
snake_case_ : Tuple = self.get_image_processor()
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : Union[str, Any] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Optional[Any] = '''lower newer'''
snake_case_ : Any = self.prepare_image_inputs()
snake_case_ : List[Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def UpperCAmelCase_ ( self : Any ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = '''google/owlvit-base-patch32'''
snake_case_ : str = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
snake_case_ : Optional[int] = ['''cat''', '''nasa badge''']
snake_case_ : Dict = processor(text=lowerCAmelCase__ )
snake_case_ : Optional[Any] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[int] = '''google/owlvit-base-patch32'''
snake_case_ : Dict = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
snake_case_ : Any = [['''cat''', '''nasa badge'''], ['''person''']]
snake_case_ : Optional[int] = processor(text=lowerCAmelCase__ )
snake_case_ : List[Any] = 16
snake_case_ : List[str] = len(lowerCAmelCase__ )
snake_case_ : Dict = max([len(lowerCAmelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
snake_case_ : Dict = '''google/owlvit-base-patch32'''
snake_case_ : Any = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
snake_case_ : Any = ['''cat''', '''nasa badge''']
snake_case_ : Union[str, Any] = processor(text=lowerCAmelCase__ )
snake_case_ : str = 16
snake_case_ : List[str] = inputs['''input_ids''']
snake_case_ : Union[str, Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self : str ) -> str:
"""simple docstring"""
snake_case_ : Any = self.get_image_processor()
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : Any = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.prepare_image_inputs()
snake_case_ : Optional[Any] = self.prepare_image_inputs()
snake_case_ : int = processor(images=lowerCAmelCase__ , query_images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = self.get_image_processor()
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : List[str] = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ : str = processor.batch_decode(lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 327 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 0 |
import datasets
from .evaluate import evaluate
_UpperCAmelCase : int = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_UpperCAmelCase : Tuple = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_UpperCAmelCase : Any = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
def A_ ( self : Tuple ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def A_ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ) -> Any:
lowerCamelCase__ : str = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
lowerCamelCase__ : Union[str, Any] = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
lowerCamelCase__ : List[str] = evaluate(dataset=lowerCAmelCase__ , predictions=lowerCAmelCase__ )
return score
| 50 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=7 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :Optional[int]=18 , lowerCAmelCase__ :Dict=30 , lowerCAmelCase__ :Tuple=400 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :List[str]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[Any]=None , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = size if size is not None else {'''shortest_edge''': 18}
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : List[Any] = batch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_frames
__SCREAMING_SNAKE_CASE : Tuple = image_size
__SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE : Any = max_resolution
__SCREAMING_SNAKE_CASE : List[Any] = do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = size
__SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
__SCREAMING_SNAKE_CASE : List[Any] = image_mean
__SCREAMING_SNAKE_CASE : List[str] = image_std
__SCREAMING_SNAKE_CASE : str = crop_size
def __magic_name__( self :Tuple ) -> Any:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = VivitImageProcessor if is_vision_available() else None
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = VivitImageProcessingTester(self )
@property
def __magic_name__( self :int ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def __magic_name__( self :Optional[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
__SCREAMING_SNAKE_CASE : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE : List[str] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :str ) -> int:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Any = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __magic_name__( self :Any ) -> List[str]:
# Initialize image_processing
__SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for video in video_inputs:
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__SCREAMING_SNAKE_CASE : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 9 | 0 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'vocab_file': 'spiece.model'}
lowercase = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
lowercase = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = []
def __init__( self , a , a="<unk>" , a="<s>" , a="</s>" , a="<pad>" , a="[SEP]" , a="[MASK]" , a="[CLS]" , a = None , **a , ) -> None:
snake_case_ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
snake_case_ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
snake_case_ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
snake_case_ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
snake_case_ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
snake_case_ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@property
def _UpperCamelCase ( self ) -> List[str]:
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self , a ) -> Any:
snake_case_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self , a ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _UpperCamelCase ( self , a ) -> List[Any]:
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def _UpperCamelCase ( self , a ) -> Dict:
snake_case_ = self.sp_model.IdToPiece(lowerCAmelCase__ )
return token
def _UpperCamelCase ( self , a ) -> Union[str, Any]:
snake_case_ = []
snake_case_ = ''''''
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
snake_case_ = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def _UpperCamelCase ( self , a , a = False , a = None , a = True , **a , ) -> str:
snake_case_ = kwargs.pop('use_source_tokenizer' , lowerCAmelCase__ )
snake_case_ = self.convert_ids_to_tokens(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case_ = []
snake_case_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
snake_case_ = []
sub_texts.append(lowerCAmelCase__ )
else:
current_sub_text.append(lowerCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
snake_case_ = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(lowerCAmelCase__ ) )
else:
snake_case_ = ''''''.join(lowerCAmelCase__ )
snake_case_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case_ = self.clean_up_tokenization(lowerCAmelCase__ )
return clean_text
else:
return text
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCamelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 178 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = parent
def __magic_name__( self :List[Any] ) -> Tuple:
return {}
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__SCREAMING_SNAKE_CASE : str = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def __magic_name__( self :Any ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __magic_name__( self :Optional[int] ) -> Any:
# Initialize feature_extractor
__SCREAMING_SNAKE_CASE : int = self.feature_extraction_class()
# Test not batched input
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()[0]
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__SCREAMING_SNAKE_CASE : List[str] = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
__SCREAMING_SNAKE_CASE : Tuple = get_html_strings()
__SCREAMING_SNAKE_CASE : Dict = feature_extractor(lowerCAmelCase__ )
# fmt: off
__SCREAMING_SNAKE_CASE : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__SCREAMING_SNAKE_CASE : str = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 9 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCAmelCase : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__lowerCAmelCase : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCAmelCase : Optional[int] = {'''unk_token''': '''<unk>'''}
__lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
__lowerCAmelCase : Tuple = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase : int = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.get_tokenizer()
__lowerCAmelCase : str = self.get_rust_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_image_processor()
__lowerCAmelCase : str = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase : str = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__lowerCAmelCase : int = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__lowerCAmelCase : List[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
__lowerCAmelCase : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.get_image_processor()
__lowerCAmelCase : List[Any] = self.get_tokenizer()
__lowerCAmelCase : Dict = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowerCAmelCase : Dict = self.prepare_image_inputs()
__lowerCAmelCase : int = image_processor(lowerCAmelCase__ , return_tensors='np' )
__lowerCAmelCase : Any = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowerCAmelCase : str = '''lower newer'''
__lowerCAmelCase : List[str] = processor(text=lowerCAmelCase__ )
__lowerCAmelCase : List[str] = tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.get_image_processor()
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowerCAmelCase : Optional[int] = '''lower newer'''
__lowerCAmelCase : List[Any] = self.prepare_image_inputs()
__lowerCAmelCase : str = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : str = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : Union[str, Any] = processor.batch_decode(lowerCAmelCase__ )
__lowerCAmelCase : Any = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.get_image_processor()
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : int = CLIPProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowerCAmelCase : Optional[Any] = '''lower newer'''
__lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 86 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : int = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :str ) -> Dict:
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
__SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Dict = '''<unk> UNwanted , running'''
__SCREAMING_SNAKE_CASE : List[str] = '''<unk> unwanted, running'''
return input_text, output_text
def __magic_name__( self :Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(lowerCAmelCase__ , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [0, 4, 8, 7] )
def __magic_name__( self :Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__( self :Dict ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[str] = TransfoXLTokenizer(lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __magic_name__( self :str ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 9 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def _lowerCAmelCase ( lowerCAmelCase_ :Dict , lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :int , lowerCAmelCase_ :Any )->List[str]:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
snake_case_ = np.full((len(lowercase__ ), sequence_length, 2) , lowercase__ )
else:
snake_case_ = np.full((len(lowercase__ ), sequence_length) , lowercase__ )
for i, tensor in enumerate(lowercase__ ):
if padding_side == "right":
if isinstance(lowercase__ , lowercase__ ):
snake_case_ = tensor[:sequence_length]
else:
snake_case_ = tensor[:sequence_length]
else:
if isinstance(lowercase__ , lowercase__ ):
snake_case_ = tensor[:sequence_length]
else:
snake_case_ = tensor[:sequence_length]
return out_tensor.tolist()
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->Optional[int]:
'''simple docstring'''
snake_case_ = ord(lowercase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
snake_case_ = unicodedata.category(lowercase__ )
if cat.startswith("P" ):
return True
return False
@dataclass
class __lowerCAmelCase ( A__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = -100
_SCREAMING_SNAKE_CASE = "pt"
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
import torch
snake_case_ = '''label''' if '''label''' in features[0].keys() else '''labels'''
snake_case_ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
snake_case_ = self.tokenizer.pad(
lowerCAmelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
snake_case_ = torch.tensor(batch["entity_ids"] ).shape[1]
snake_case_ = self.tokenizer.padding_side
if padding_side == "right":
snake_case_ = [
list(lowerCAmelCase__ ) + [self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) for label in labels
]
else:
snake_case_ = [
[self.label_pad_token_id] * (sequence_length - len(lowerCAmelCase__ )) + list(lowerCAmelCase__ ) for label in labels
]
snake_case_ = [feature['''ner_tags'''] for feature in features]
snake_case_ = padding_tensor(lowerCAmelCase__ , -1 , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ = [feature['''original_entity_spans'''] for feature in features]
snake_case_ = padding_tensor(lowerCAmelCase__ , (-1, -1) , lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ = {k: torch.tensor(lowerCAmelCase__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 159 |
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__=False ):
if isinstance(lowercase__ , lowercase__ ) and isinstance(lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = len(set_a.intersection(lowercase__ ) )
if alternative_union:
__SCREAMING_SNAKE_CASE : int = len(lowercase__ ) + len(lowercase__ )
else:
__SCREAMING_SNAKE_CASE : int = len(set_a.union(lowercase__ ) )
return intersection / union
if isinstance(lowercase__ , (list, tuple) ) and isinstance(lowercase__ , (list, tuple) ):
__SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
__SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) + len(lowercase__ )
return len(lowercase__ ) / union
else:
__SCREAMING_SNAKE_CASE : Tuple = set_a + [element for element in set_b if element not in set_a]
return len(lowercase__ ) / len(lowercase__ )
return len(lowercase__ ) / len(lowercase__ )
return None
if __name__ == "__main__":
__lowerCAmelCase : List[Any] ={'a', 'b', 'c', 'd', 'e'}
__lowerCAmelCase : Optional[Any] ={'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 9 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = '▁'
__lowerCAmelCase = {'vocab_file': 'spiece.model'}
__lowerCAmelCase = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
__lowerCAmelCase = {
'google/pegasus-xsum': 512,
}
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( A__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__(self , UpperCAmelCase , UpperCAmelCase="<pad>" , UpperCAmelCase="</s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<mask_2>" , UpperCAmelCase="<mask_1>" , UpperCAmelCase=None , UpperCAmelCase=103 , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(lowerCAmelCase__ )}, but is"""
f""" {type(lowerCAmelCase__ )}""" )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(lowerCAmelCase__ ) , self.offset - 1 )
]
if len(set(lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token_sent=lowerCAmelCase__ , offset=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
_snake_case = mask_token_sent
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
# add special tokens to encoder dict
_snake_case = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
_snake_case = {v: k for k, v in self.encoder.items()}
@property
def lowercase (self ) -> int:
return len(self.sp_model ) + self.offset
def lowercase (self ) -> Dict[str, int]:
_snake_case = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Dict:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__(self , UpperCAmelCase ) -> Tuple:
_snake_case = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase (self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def lowercase (self , UpperCAmelCase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
_snake_case = self.sp_model.piece_to_id(lowerCAmelCase__ )
return sp_id + self.offset
def lowercase (self , UpperCAmelCase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
_snake_case = self.sp_model.IdToPiece(index - self.offset )
return token
def lowercase (self , UpperCAmelCase ) -> List[Any]:
_snake_case = []
_snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
_snake_case = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def lowercase (self , UpperCAmelCase=False ) -> Dict:
return 1
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowerCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(lowerCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase (self , UpperCAmelCase , UpperCAmelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_snake_case = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,) | 341 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 0 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(lowercase__ , lowercase__ ):
raise TypeError("Input value must be a \'int\' type" )
return bin(lowercase__ ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 241 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = nn.functional.normalize(lowercase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(lowercase__ )
return torch.mm(lowercase__ , normalized_text_embeds.t() )
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPConfig
SCREAMING_SNAKE_CASE__ : List[str] = ['''CLIPEncoderLayer''']
def __init__( self :str , lowerCAmelCase__ :CLIPConfig ) -> Tuple:
super().__init__(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : int = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[Any] = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : List[str] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Any = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : int = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :torch.FloatTensor ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
__SCREAMING_SNAKE_CASE : List[str] = self.visual_projection(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[int] = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : List[Any] = 0.0
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : List[str] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = special_care * 0.01
__SCREAMING_SNAKE_CASE : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Any = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 9 | 0 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=False ) -> Any:
try:
_snake_case = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_snake_case = default
else:
# KEY is set, convert it to True or False.
try:
_snake_case = strtobool(lowercase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
UpperCAmelCase__ = parse_flag_from_env('RUN_SLOW', default=False)
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Any:
return unittest.skip('''Test was skipped''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Optional[int]:
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Tuple:
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> Dict:
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]:
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Tuple:
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> str:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> Tuple:
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> Dict:
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Optional[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> str:
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : List[str] ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Union[str, Any]:
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Optional[int]:
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None ) -> List[Any]:
if test_case is None:
return partial(lowercase__ , version=lowercase__ )
return unittest.skipUnless(is_torch_version('''>=''' , lowercase__ ) , f'''test requires torch version >= {version}''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Dict ) -> Optional[int]:
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] ) -> Tuple:
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(lowercase__ )
UpperCAmelCase__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> Optional[Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(lowercase__ )
class lowerCAmelCase__ ( unittest.TestCase ):
__a = True
@classmethod
def lowercase ( cls : Optional[Any] ):
_snake_case = tempfile.mkdtemp()
@classmethod
def lowercase ( cls : List[Any] ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase ( self : List[Any] ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase__ )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : List[str] ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : str , _lowerCamelCase : Union[mock.Mock, List[mock.Mock]] ):
_snake_case = mocks if isinstance(lowerCAmelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> List[str]:
_snake_case = AcceleratorState()
_snake_case = tensor[None].clone().to(state.device )
_snake_case = gather(lowercase__ ).cpu()
_snake_case = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowercase__ ):
return False
return True
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : str ):
_snake_case = returncode
_snake_case = stdout
_snake_case = stderr
async def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ) -> Any:
while True:
_snake_case = await stream.readline()
if line:
callback(lowercase__ )
else:
break
async def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Tuple=None , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=False ) -> str:
if echo:
print('''\nRunning: ''' , ''' '''.join(lowercase__ ) )
_snake_case = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_snake_case = []
_snake_case = []
def tee(__lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]="" ):
_snake_case = line.decode('''utf-8''' ).rstrip()
sink.append(lowercase__ )
if not quiet:
print(lowercase__ , lowercase__ , file=lowercase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __lowerCamelCase : tee(lowercase__ , lowercase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __lowerCamelCase : tee(lowercase__ , lowercase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=lowercase__ , )
return _RunOutput(await p.wait() , lowercase__ , lowercase__ )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=1_80 , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=True ) -> int:
_snake_case = asyncio.get_event_loop()
_snake_case = loop.run_until_complete(
_stream_subprocess(lowercase__ , env=lowercase__ , stdin=lowercase__ , timeout=lowercase__ , quiet=lowercase__ , echo=lowercase__ ) )
_snake_case = ''' '''.join(lowercase__ )
if result.returncode > 0:
_snake_case = '''\n'''.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class lowerCAmelCase__ ( A__ ):
pass
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : List[Any]=False ) -> Any:
try:
_snake_case = subprocess.check_output(lowercase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowercase__ , '''decode''' ):
_snake_case = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(lowercase__ )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 288 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__lowerCAmelCase : List[Any] =datasets.load_iris()
__lowerCAmelCase : Tuple =np.array(data['data'])
__lowerCAmelCase : Dict =np.array(data['target'])
__lowerCAmelCase : List[str] =data['target_names']
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str =train_test_split(X, y)
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return np.linalg.norm(np.array(lowercase__ ) - np.array(lowercase__ ) )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=5 ):
__SCREAMING_SNAKE_CASE : Optional[int] = zip(lowercase__ , lowercase__ )
# List of distances of all points from the point to be classified
__SCREAMING_SNAKE_CASE : Dict = []
for data_point in data:
__SCREAMING_SNAKE_CASE : Tuple = euclidean_distance(data_point[0] , lowercase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__SCREAMING_SNAKE_CASE : int = [i[1] for i in sorted(lowercase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__SCREAMING_SNAKE_CASE : Any = Counter(lowercase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 9 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Any:
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ = [True] * (num + 1)
A__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowercase__ ):
A__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 68 |
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def __magic_name__( *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
pass
def _UpperCamelCase ( lowercase__ ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : str =(
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __magic_name__( self :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = pipeline(
'''document-question-answering''' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
__SCREAMING_SNAKE_CASE : str = '''What is the placebo?'''
__SCREAMING_SNAKE_CASE : str = [
{
'''image''': load_image(lowerCAmelCase__ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
{'''score''': ANY(lowerCAmelCase__ ), '''answer''': ANY(lowerCAmelCase__ ), '''start''': ANY(lowerCAmelCase__ ), '''end''': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : int = '''How many cats are there?'''
__SCREAMING_SNAKE_CASE : Optional[int] = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__SCREAMING_SNAKE_CASE : Any = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
__SCREAMING_SNAKE_CASE : Dict = INVOICE_URL
__SCREAMING_SNAKE_CASE : Any = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : int = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Tuple = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : int = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : str = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :int ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : str = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : str = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=lowerCAmelCase__ , revision='''3dc6de3''' , max_seq_len=50 , )
__SCREAMING_SNAKE_CASE : List[str] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Dict = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
__SCREAMING_SNAKE_CASE : Optional[int] = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
__SCREAMING_SNAKE_CASE : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '''''' ) ) )
# This model should also work if `image` is set to None
__SCREAMING_SNAKE_CASE : List[Any] = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = INVOICE_URL
__SCREAMING_SNAKE_CASE : Optional[int] = '''What is the invoice number?'''
__SCREAMING_SNAKE_CASE : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __magic_name__( self :Union[str, Any] ) -> Tuple:
pass
| 9 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = '''ylacombe/bark-small'''
_a = tempfile.mkdtemp()
_a = '''en_speaker_1'''
_a = '''This is a test string'''
_a = '''speaker_embeddings_path.json'''
_a = '''speaker_embeddings'''
def a__ (self , **A ) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def a__ (self ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ (self ) -> str:
"""simple docstring"""
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
_a = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_a = 35
_a = 2
_a = 8
_a = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_a = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_a = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
_a = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
_a = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_a = processor(text=self.input_string , voice_preset=self.voice_preset )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.get_tokenizer()
_a = BarkProcessor(tokenizer=lowerCAmelCase__ )
_a = processor(text=self.input_string )
_a = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 211 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = MobileBertTokenizer
lowerCAmelCase__ = MobileBertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_non_english
lowerCAmelCase__ = '''google/mobilebert-uncased'''
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# With lower casing
__SCREAMING_SNAKE_CASE = self.get_tokenizer(do_lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BasicTokenizer(do_lower_case=lowerCAmelCase__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__SCREAMING_SNAKE_CASE = {}
for i, token in enumerate(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE = i
__SCREAMING_SNAKE_CASE = WordpieceTokenizer(vocab=lowerCAmelCase__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(
lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase__ , """do_lower_case""" ) else False
__SCREAMING_SNAKE_CASE = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['''的''', '''人''', '''有''']
__SCREAMING_SNAKE_CASE = ''''''.join(lowerCAmelCase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase__ )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase__ )
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 267 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :NestedDataStructureLike[PathLike] , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :Optional[int] , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , split=lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[str] = path_or_paths if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE : int = Text(
cache_dir=lowerCAmelCase__ , data_files=lowerCAmelCase__ , features=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __magic_name__( self :Dict ) -> Tuple:
# Build iterable dataset
if self.streaming:
__SCREAMING_SNAKE_CASE : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Tuple = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
__SCREAMING_SNAKE_CASE : Optional[int] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 9 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : List[Any] = XCLIPTextConfig()
# derive patch size from model name
snake_case_ : Optional[int] = model_name.find('patch' )
snake_case_ : Dict = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
snake_case_ : Union[str, Any] = XCLIPVisionConfig(patch_size=lowercase__ , num_frames=lowercase__ )
if "large" in model_name:
snake_case_ : int = 7_68
snake_case_ : Dict = 30_72
snake_case_ : List[Any] = 12
snake_case_ : Any = 10_24
snake_case_ : Union[str, Any] = 40_96
snake_case_ : Union[str, Any] = 16
snake_case_ : List[Any] = 24
snake_case_ : Union[str, Any] = 7_68
snake_case_ : str = 30_72
if model_name == "xclip-large-patch14-16-frames":
snake_case_ : Any = 3_36
snake_case_ : int = XCLIPConfig.from_text_vision_configs(lowercase__ , lowercase__ )
if "large" in model_name:
snake_case_ : Optional[int] = 7_68
return config
def SCREAMING_SNAKE_CASE__ ( __a ):
# text encoder
if name == "token_embedding.weight":
snake_case_ : Dict = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
snake_case_ : Optional[Any] = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
snake_case_ : Dict = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
snake_case_ : Optional[int] = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
snake_case_ : Tuple = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
snake_case_ : Tuple = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
snake_case_ : int = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
snake_case_ : List[Any] = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
snake_case_ : List[Any] = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
snake_case_ : List[str] = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
snake_case_ : str = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
snake_case_ : List[str] = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
snake_case_ : List[Any] = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
snake_case_ : Dict = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
snake_case_ : Optional[Any] = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
snake_case_ : str = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
snake_case_ : List[str] = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
snake_case_ : Tuple = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
snake_case_ : Any = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
snake_case_ : Dict = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
snake_case_ : Optional[int] = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
snake_case_ : List[Any] = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
for key in orig_state_dict.copy().keys():
snake_case_ : List[str] = orig_state_dict.pop(lowercase__ )
if "attn.in_proj" in key:
snake_case_ : str = key.split('.' )
if key.startswith('visual' ):
snake_case_ : str = key_split[3]
snake_case_ : Union[str, Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ : int = val[
:dim, :
]
snake_case_ : str = val[
dim : dim * 2, :
]
snake_case_ : str = val[
-dim:, :
]
else:
snake_case_ : str = val[
:dim
]
snake_case_ : Union[str, Any] = val[
dim : dim * 2
]
snake_case_ : Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ : Any = val[
:dim, :
]
snake_case_ : Optional[int] = val[
dim : dim * 2, :
]
snake_case_ : Optional[int] = val[
-dim:, :
]
else:
snake_case_ : Optional[int] = val[:dim]
snake_case_ : Optional[int] = val[
dim : dim * 2
]
snake_case_ : Tuple = val[-dim:]
elif key.startswith('mit' ):
snake_case_ : Dict = key_split[2]
snake_case_ : Any = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ : Dict = val[:dim, :]
snake_case_ : Union[str, Any] = val[dim : dim * 2, :]
snake_case_ : List[Any] = val[-dim:, :]
else:
snake_case_ : Dict = val[:dim]
snake_case_ : Optional[Any] = val[dim : dim * 2]
snake_case_ : List[str] = val[-dim:]
else:
snake_case_ : Optional[Any] = key_split[2]
snake_case_ : Optional[int] = config.text_config.hidden_size
if "weight" in key:
snake_case_ : Dict = val[:dim, :]
snake_case_ : Optional[Any] = val[
dim : dim * 2, :
]
snake_case_ : Optional[Any] = val[-dim:, :]
else:
snake_case_ : Tuple = val[:dim]
snake_case_ : Any = val[
dim : dim * 2
]
snake_case_ : Any = val[-dim:]
else:
snake_case_ : List[str] = rename_key(lowercase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ : Any = val.T
snake_case_ : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __a ):
if num_frames == 8:
snake_case_ : List[str] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
snake_case_ : str = '''eating_spaghetti.npy'''
elif num_frames == 32:
snake_case_ : Dict = '''eating_spaghetti_32_frames.npy'''
snake_case_ : str = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=lowercase__ , repo_type='dataset' , )
snake_case_ : Union[str, Any] = np.load(lowercase__ )
return list(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( __a , __a=None , __a=False ):
snake_case_ : Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
snake_case_ : Optional[Any] = model_to_url[model_name]
snake_case_ : int = 8
if "16-frames" in model_name:
snake_case_ : Tuple = 16
elif "shot" in model_name:
snake_case_ : List[Any] = 32
snake_case_ : Any = get_xclip_config(lowercase__ , lowercase__ )
snake_case_ : str = XCLIPModel(lowercase__ )
model.eval()
if "drive" in checkpoint_url:
snake_case_ : Optional[int] = '''pytorch_model.bin'''
gdown.cached_download(lowercase__ , lowercase__ , quiet=lowercase__ )
snake_case_ : Optional[int] = torch.load(lowercase__ , map_location='cpu' )['''model''']
else:
snake_case_ : List[str] = torch.hub.load_state_dict_from_url(lowercase__ )['''model''']
snake_case_ : str = convert_state_dict(lowercase__ , lowercase__ )
snake_case_ : Dict = XCLIPModel(lowercase__ )
snake_case_ : Any = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ : Tuple = 3_36 if model_name == '''xclip-large-patch14-16-frames''' else 2_24
snake_case_ : Dict = VideoMAEImageProcessor(size=lowercase__ )
snake_case_ : List[Any] = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ : int = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
snake_case_ : Optional[Any] = XCLIPProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
snake_case_ : Dict = prepare_video(lowercase__ )
snake_case_ : int = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=lowercase__ , return_tensors='pt' , padding=lowercase__ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ : Optional[Any] = model(**lowercase__ )
# Verify outputs
snake_case_ : Tuple = outputs.logits_per_video
snake_case_ : Optional[int] = logits_per_video.softmax(dim=1 )
print('Probs:' , lowercase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ : Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ : Dict = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ : Optional[Any] = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ : Optional[int] = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ : Any = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ : Union[str, Any] = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ : Any = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ : Union[str, Any] = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ : List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ : str = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ : Dict = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ : Union[str, Any] = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ : Any = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ : Dict = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ : Optional[Any] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ : List[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ : str = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ : List[Any] = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(lowercase__ , organization='nielsr' )
processor.push_to_hub(lowercase__ , organization='nielsr' )
slow_tokenizer.push_to_hub(lowercase__ , organization='nielsr' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 327 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
__SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('''RGB''' )
return image
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = dct.pop(lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = val
def _UpperCamelCase ( lowercase__ , lowercase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
__SCREAMING_SNAKE_CASE : int = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat((q_bias, torch.zeros_like(lowercase__ , requires_grad=lowercase__ ), v_bias) )
__SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 364 if '''coco''' in model_name else 224
__SCREAMING_SNAKE_CASE : List[str] = BlipaVisionConfig(image_size=lowercase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE : List[Any] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=lowercase__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = BlipaConfig(vision_config=lowercase__ , text_config=lowercase__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( lowercase__ , lowercase__=None , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
__SCREAMING_SNAKE_CASE : str = tokenizer('''\n''' , add_special_tokens=lowercase__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = get_blipa_config(lowercase__ , eos_token_id=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaForConditionalGeneration(lowercase__ ).eval()
__SCREAMING_SNAKE_CASE : int = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
__SCREAMING_SNAKE_CASE : List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = load_model_and_preprocess(
name=lowercase__ , model_type=lowercase__ , is_eval=lowercase__ , device=lowercase__ )
original_model.eval()
print('''Done!''' )
# update state dict keys
__SCREAMING_SNAKE_CASE : List[str] = original_model.state_dict()
__SCREAMING_SNAKE_CASE : Optional[int] = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE : Tuple = state_dict.pop(lowercase__ )
if key.startswith('''Qformer.bert''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE : Dict = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
__SCREAMING_SNAKE_CASE : List[str] = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
__SCREAMING_SNAKE_CASE : Tuple = key.replace('''t5''' , '''language''' )
__SCREAMING_SNAKE_CASE : Tuple = val
# read in qv biases
read_in_q_v_bias(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = hf_model.load_state_dict(lowercase__ , strict=lowercase__ )
assert len(lowercase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE : List[str] = load_demo_image()
__SCREAMING_SNAKE_CASE : Any = vis_processors['''eval'''](lowercase__ ).unsqueeze(0 ).to(lowercase__ )
__SCREAMING_SNAKE_CASE : str = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(lowercase__ )
# create processor
__SCREAMING_SNAKE_CASE : List[Any] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=lowercase__ , image_std=lowercase__ )
__SCREAMING_SNAKE_CASE : int = BlipaProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
__SCREAMING_SNAKE_CASE : Any = processor(images=lowercase__ , return_tensors='''pt''' ).pixel_values.to(lowercase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowercase__ , lowercase__ )
original_model.to(lowercase__ )
hf_model.to(lowercase__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE : Dict = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
__SCREAMING_SNAKE_CASE : Dict = hf_model(lowercase__ , lowercase__ ).logits
else:
__SCREAMING_SNAKE_CASE : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE : Optional[int] = hf_model(lowercase__ , lowercase__ , labels=lowercase__ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowercase__ )
assert torch.allclose(logits[0, :3, :3] , lowercase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowercase__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.dtype
assert torch.allclose(original_logits.to(lowercase__ ) , lowercase__ , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
__SCREAMING_SNAKE_CASE : Any = ''''''
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(lowercase__ , return_tensors='''pt''' ).input_ids.to(lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = original_model.generate({'''image''': original_pixel_values} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hf_model.generate(
lowercase__ , lowercase__ , do_sample=lowercase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = input_ids.shape[1]
__SCREAMING_SNAKE_CASE : Any = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [text.strip() for text in output_text]
print('''HF generation:''' , lowercase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
__lowerCAmelCase : Tuple =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : int ) -> Any:
lowerCamelCase__ : Optional[int] = inspect.getfile(accelerate.test_utils )
lowerCamelCase__ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
lowerCamelCase__ : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
lowerCamelCase__ : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def A_ ( self : Optional[int] ) -> Any:
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowerCamelCase__ : Optional[Any] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self : Optional[int] ) -> Tuple:
print(F"""Found {torch.cuda.device_count()} devices.""" )
lowerCamelCase__ : Union[str, Any] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(F"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self : Tuple ) -> Dict:
lowerCamelCase__ : Optional[Any] = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def A_ ( self : str ) -> int:
print(F"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
lowerCamelCase__ : Dict = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
_UpperCAmelCase : Dict = Accelerator()
_UpperCAmelCase : Any = (accelerator.state.process_index + 2, 10)
_UpperCAmelCase : List[Any] = torch.randint(0, 10, shape).to(accelerator.device)
_UpperCAmelCase : Tuple = ''
_UpperCAmelCase : Union[str, Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_UpperCAmelCase : Union[str, Any] = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_UpperCAmelCase : Optional[int] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 50 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : List[str] ={
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'gpt-neox-20b': 2_0_4_8,
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) )
__SCREAMING_SNAKE_CASE : str = add_prefix_space
__SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 9 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
lowerCAmelCase = '''roberta'''
def __init__( self , a=5_02_65 , a=7_68 , a=12 , a=12 , a=30_72 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=2 , a=0.02 , a=1E-12 , a=1 , a=0 , a=2 , a="absolute" , a=True , a=None , **a , ) -> List[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = classifier_dropout
class UpperCamelCase_ ( A__ ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 178 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__lowerCAmelCase : Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__lowerCAmelCase : Any ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__lowerCAmelCase : Optional[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Optional[Any] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def __magic_name__( self :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
__SCREAMING_SNAKE_CASE : str = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 9 | 0 |
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCamelCase__ = '\\n Text data.\n Second line of data.'
lowerCamelCase__ = 'file'
@pytest.fixture(scope='session' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '''.zstd''')
__lowerCAmelCase : Union[str, Any] = bytes(lowercase__ , 'utf-8' )
with zstd.open(lowercase__ , 'wb' ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def __lowerCAmelCase (_UpperCamelCase ):
with open(os.path.join(tmpfs.local_root_dir , lowercase__ ) , 'w' ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[int] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__lowerCAmelCase : str = input_paths[compression_format]
__lowerCAmelCase : Tuple = tmp_path / '''cache'''
__lowerCAmelCase : Any = DownloadConfig(cache_dir=lowercase__ , extract_compressed_file=lowercase__ )
__lowerCAmelCase : Union[str, Any] = cached_path(lowercase__ , download_config=lowercase__ )
with open(lowercase__ ) as f:
__lowerCAmelCase : Any = f.read()
with open(lowercase__ ) as f:
__lowerCAmelCase : int = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[str] = '''custom_cache'''
__lowerCAmelCase : Optional[Any] = '''custom_extracted_dir'''
__lowerCAmelCase : int = tmp_path / '''custom_extracted_path'''
if default_extracted:
__lowerCAmelCase : List[Any] = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , lowercase__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(lowercase__ ) )
__lowerCAmelCase : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__lowerCAmelCase : int = xz_file
__lowerCAmelCase : int = (
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase__ )
)
__lowerCAmelCase : Optional[int] = cached_path(lowercase__ , download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def __lowerCAmelCase (_UpperCamelCase ):
# absolute path
__lowerCAmelCase : Dict = str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
__lowerCAmelCase : str = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def __lowerCAmelCase (_UpperCamelCase ):
# absolute path
__lowerCAmelCase : int = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
__lowerCAmelCase : Dict = '''./__missing_file__.txt'''
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = get_from_cache(F"tmp://{tmpfs_file}" )
with open(lowercase__ ) as f:
__lowerCAmelCase : Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase__ )
def __lowerCAmelCase ():
with pytest.raises(lowercase__ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase__ )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = tmp_path_factory.mktemp('data' ) / '''file.html'''
with pytest.raises(lowercase__ ):
http_get('https://huggingface.co' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase__ )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = tmp_path_factory.mktemp('data' ) / '''file.html'''
with pytest.raises(lowercase__ ):
ftp_get('ftp://huggingface.co' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , lowercase__ )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = tmp_path_factory.mktemp('data' ) / '''file.html'''
with pytest.raises(lowercase__ ):
fsspec_get('s3://huggingface.co' , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head('s3://huggingface.co' ) | 86 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :List[Any]=99 , lowerCAmelCase__ :List[str]=32 , lowerCAmelCase__ :Any=5 , lowerCAmelCase__ :List[str]=4 , lowerCAmelCase__ :int=37 , lowerCAmelCase__ :Optional[int]="gelu" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :Tuple=0.02 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
__SCREAMING_SNAKE_CASE : Dict = parent
__SCREAMING_SNAKE_CASE : Any = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : int = use_token_type_ids
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : int = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = num_labels
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
__SCREAMING_SNAKE_CASE : Union[str, Any] = scope
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.vocab_size - 1
def __magic_name__( self :Optional[Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Dict = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__SCREAMING_SNAKE_CASE : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __magic_name__( self :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Dict = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict , *lowerCAmelCase__ :List[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = OpenAIGPTLMHeadModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Tuple , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , *lowerCAmelCase__ :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : Any = OpenAIGPTDoubleHeadsModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__( self :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , *lowerCAmelCase__ :Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : List[Any] = OpenAIGPTForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : str = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : List[str] = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class _lowercase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : str = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE__ : str = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def __magic_name__( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :int=False ) -> Dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : Dict = inputs_dict['''labels''']
__SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def __magic_name__( self :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : int = OpenAIGPTModelTester(self )
__SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __magic_name__( self :Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :List[str] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCAmelCase__ )
def __magic_name__( self :int ) -> int:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCAmelCase__ )
@slow
def __magic_name__( self :Any ) -> List[Any]:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : Dict = OpenAIGPTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __magic_name__( self :Union[str, Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[str] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCAmelCase__ ) # the president is
__SCREAMING_SNAKE_CASE : Dict = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__SCREAMING_SNAKE_CASE : Dict = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ )
self.assertListEqual(output_ids[0].tolist() , lowerCAmelCase__ )
| 9 | 0 |
import numpy as np
import qiskit
def _lowerCAmelCase ( lowerCAmelCase_ :str = 8 , lowerCAmelCase_ :int = None )->Union[str, Any]:
'''simple docstring'''
snake_case_ = np.random.default_rng(seed=lowercase__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
snake_case_ = 6 * key_len
# Measurement basis for Alice's qubits.
snake_case_ = rng.integers(2 , size=lowercase__ )
# The set of states Alice will prepare.
snake_case_ = rng.integers(2 , size=lowercase__ )
# Measurement basis for Bob's qubits.
snake_case_ = rng.integers(2 , size=lowercase__ )
# Quantum Circuit to simulate BB84
snake_case_ = qiskit.QuantumCircuit(lowercase__ , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowercase__ ):
if alice_state[index] == 1:
bbaa_circ.x(lowercase__ )
if alice_basis[index] == 1:
bbaa_circ.h(lowercase__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowercase__ ):
if bob_basis[index] == 1:
bbaa_circ.h(lowercase__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
snake_case_ = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
snake_case_ = qiskit.execute(lowercase__ , lowercase__ , shots=1 , seed_simulator=lowercase__ )
# Returns the result of measurement.
snake_case_ = job.result().get_counts(lowercase__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
snake_case_ = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowercase__ , lowercase__ , lowercase__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
snake_case_ = gen_key[:key_len] if len(lowercase__ ) >= key_len else gen_key.ljust(lowercase__ , "0" )
return key
if __name__ == "__main__":
print(F'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 159 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
__lowerCAmelCase = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__lowerCAmelCase = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__lowerCAmelCase = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
_snake_case = new_id
# turn into Numpy arrays
_snake_case = np.array(lowercase__ )
_snake_case = np.array(lowercase__ )
if reduce_labels:
_snake_case = 255
_snake_case = label - 1
_snake_case = 255
_snake_case = label != ignore_index
_snake_case = np.not_equal(lowercase__ , lowercase__ )
_snake_case = pred_label[mask]
_snake_case = np.array(lowercase__ )[mask]
_snake_case = pred_label[pred_label == label]
_snake_case = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
_snake_case = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
_snake_case = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
_snake_case = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
_snake_case = np.zeros((num_labels,) , dtype=np.floataa )
_snake_case = np.zeros((num_labels,) , dtype=np.floataa )
_snake_case = np.zeros((num_labels,) , dtype=np.floataa )
_snake_case = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase__ , lowercase__ ):
_snake_case = intersect_and_union(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
_snake_case = total_intersect_and_union(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# compute metrics
_snake_case = {}
_snake_case = total_area_intersect.sum() / total_area_label.sum()
_snake_case = total_area_intersect / total_area_union
_snake_case = total_area_intersect / total_area_label
_snake_case = np.nanmean(lowercase__ )
_snake_case = np.nanmean(lowercase__ )
_snake_case = all_acc
_snake_case = iou
_snake_case = acc
if nan_to_num is not None:
_snake_case = {metric: np.nan_to_num(lowercase__ , nan=lowercase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , ) -> Tuple:
_snake_case = mean_iou(
results=lowerCAmelCase__ , gt_seg_maps=lowerCAmelCase__ , num_labels=lowerCAmelCase__ , ignore_index=lowerCAmelCase__ , nan_to_num=lowerCAmelCase__ , label_map=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ , )
return iou_result | 341 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 0 |
"""simple docstring"""
# Imports
import numpy as np
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , a_ : Optional[Any]=None , a_ : str=None , a_ : Tuple=None , a_ : Any=None , a_ : Any=None ):
self.set_matricies(red=lowerCAmelCase__ , green=lowerCAmelCase__ , blue=lowerCAmelCase__ , red_edge=lowerCAmelCase__ , nir=lowerCAmelCase__ )
def lowerCamelCase ( self : Tuple , a_ : Optional[Any]=None , a_ : List[Any]=None , a_ : Union[str, Any]=None , a_ : List[Any]=None , a_ : str=None ):
if red is not None:
lowerCAmelCase_ : Tuple = red
if green is not None:
lowerCAmelCase_ : Dict = green
if blue is not None:
lowerCAmelCase_ : Tuple = blue
if red_edge is not None:
lowerCAmelCase_ : List[Any] = red_edge
if nir is not None:
lowerCAmelCase_ : List[Any] = nir
return True
def lowerCamelCase ( self : Optional[Any] , a_ : List[Any]="" , a_ : List[Any]=None , a_ : Tuple=None , a_ : str=None , a_ : List[Any]=None , a_ : Tuple=None ):
self.set_matricies(red=lowerCAmelCase__ , green=lowerCAmelCase__ , blue=lowerCAmelCase__ , red_edge=lowerCAmelCase__ , nir=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def lowerCamelCase ( self : Any ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowerCamelCase ( self : Optional[int] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowerCamelCase ( self : Union[str, Any] ):
return self.nir * (self.red / (self.green**2))
def lowerCamelCase ( self : str ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowerCamelCase ( self : int ):
return (self.nir - self.red) / (self.nir + self.red)
def lowerCamelCase ( self : int ):
return (self.nir - self.blue) / (self.nir + self.blue)
def lowerCamelCase ( self : int ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowerCamelCase ( self : Any ):
return (self.nir - self.green) / (self.nir + self.green)
def lowerCamelCase ( self : Optional[Any] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowerCamelCase ( self : Any ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowerCamelCase ( self : str ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowerCamelCase ( self : int ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowerCamelCase ( self : List[Any] , a_ : List[Any]=0.08 , a_ : Optional[int]=1.22 , a_ : Any=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowerCamelCase ( self : Tuple ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowerCamelCase ( self : str ):
return (self.nir / self.green) - 1
def lowerCamelCase ( self : Tuple ):
return (self.nir / self.redEdge) - 1
def lowerCamelCase ( self : Optional[Any] ):
return (self.red - self.blue) / self.red
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Optional[int] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowerCamelCase ( self : Dict ):
return self.nir - self.green
def lowerCamelCase ( self : Any ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def lowerCamelCase ( self : Any , a_ : List[Any]=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def lowerCamelCase ( self : Any , a_ : Optional[Any]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowerCamelCase ( self : Optional[int] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowerCamelCase ( self : int , a_ : Optional[Any]=None , a_ : Optional[int]=None ):
return (self.nir - b) / (a * self.red)
def lowerCamelCase ( self : Any ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowerCamelCase ( self : List[Any] ):
return (self.red + self.green + self.blue) / 30.5
def lowerCamelCase ( self : List[Any] ):
return self.nir / self.red
def lowerCamelCase ( self : Any ):
return (self.rvi() - 1) / (self.rvi() + 1)
def lowerCamelCase ( self : List[Any] ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowerCamelCase ( self : List[Any] ):
return self.green / (self.nir + self.red + self.green)
def lowerCamelCase ( self : int ):
return self.nir / (self.nir + self.red + self.green)
def lowerCamelCase ( self : Any ):
return self.red / (self.nir + self.red + self.green)
def lowerCamelCase ( self : Any ):
return (self.green - self.red) / (self.green + self.red)
def lowerCamelCase ( self : str ):
return (self.red - self.green) / (self.red + self.green)
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Dict = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase_ : Any = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowerCamelCase ( self : Union[str, Any] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowerCamelCase ( self : int ):
return self.nir / self.red
def lowerCamelCase ( self : List[str] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def lowerCamelCase ( self : List[str] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 241 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : str = '''ylacombe/bark-small'''
__SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = '''en_speaker_1'''
__SCREAMING_SNAKE_CASE : Any = '''This is a test string'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings_path.json'''
__SCREAMING_SNAKE_CASE : int = '''speaker_embeddings'''
def __magic_name__( self :List[str] , **lowerCAmelCase__ :Union[str, Any] ) -> Any:
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase__ )
def __magic_name__( self :List[str] ) -> int:
shutil.rmtree(self.tmpdirname )
def __magic_name__( self :Dict ) -> str:
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = BarkProcessor(tokenizer=lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __magic_name__( self :Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __magic_name__( self :List[str] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__SCREAMING_SNAKE_CASE : str = 35
__SCREAMING_SNAKE_CASE : str = 2
__SCREAMING_SNAKE_CASE : List[Any] = 8
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''semantic_prompt''': np.ones(lowerCAmelCase__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(lowerCAmelCase__ , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __magic_name__( self :Tuple ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=self.input_string )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 9 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
UpperCAmelCase__ = {'facebook/blenderbot-3B': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _UpperCAmelCase ( ) -> List[str]:
_snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> List[Any]:
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class lowerCAmelCase__ ( A__ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Optional[Any]="replace" , _lowerCamelCase : List[str]="<s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : Optional[Any]="<s>" , _lowerCamelCase : str="<unk>" , _lowerCamelCase : int="<pad>" , _lowerCamelCase : Tuple="<mask>" , _lowerCamelCase : int=False , **_lowerCamelCase : int , ):
_snake_case = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
_snake_case = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
_snake_case = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
_snake_case = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
_snake_case = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
_snake_case = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
_snake_case = json.load(lowerCAmelCase__ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as merges_handle:
_snake_case = merges_handle.read().split('''\n''' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase ( self : Optional[int] ):
return len(self.encoder )
def lowercase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self : Dict , _lowerCamelCase : Optional[Any] ):
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCAmelCase__ )
_snake_case = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
_snake_case = min(lowerCAmelCase__ , key=lambda _lowerCamelCase : self.bpe_ranks.get(lowerCAmelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCAmelCase__ ):
try:
_snake_case = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCAmelCase__ )
_snake_case = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
_snake_case = get_pairs(lowerCAmelCase__ )
_snake_case = ''' '''.join(lowerCAmelCase__ )
_snake_case = word
return word
def lowercase ( self : Any , _lowerCamelCase : Union[str, Any] ):
_snake_case = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
_snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(''' ''' ) )
return bpe_tokens
def lowercase ( self : str , _lowerCamelCase : List[str] ):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def lowercase ( self : Any , _lowerCamelCase : Optional[int] ):
return self.decoder.get(lowerCAmelCase__ )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : List[Any] ):
_snake_case = ''''''.join(lowerCAmelCase__ )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '''\n''' )
_snake_case = 0
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_snake_case = token_index
writer.write(''' '''.join(lowerCAmelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def lowercase ( self : Any , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=False , **_lowerCamelCase : int ):
_snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
_snake_case = ''' ''' + text
return (text, kwargs)
def lowercase ( self : List[str] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def lowercase ( self : Any , _lowerCamelCase : "Conversation" ):
_snake_case = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCAmelCase__ )
_snake_case = ''' '''.join(lowerCAmelCase__ )
_snake_case = self.encode(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > self.model_max_length:
_snake_case = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 288 |
from importlib import import_module
from .logging import get_logger
__lowerCAmelCase : str =get_logger(__name__)
class _lowercase :
'''simple docstring'''
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str=None ) -> int:
__SCREAMING_SNAKE_CASE : List[str] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
def __init__( self :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict=None ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = obj
__SCREAMING_SNAKE_CASE : str = target
__SCREAMING_SNAKE_CASE : Dict = new
__SCREAMING_SNAKE_CASE : Union[str, Any] = target.split('''.''' )[0]
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : Tuple = attrs or []
def __enter__( self :int ) -> Dict:
*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
__SCREAMING_SNAKE_CASE : Any = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__SCREAMING_SNAKE_CASE : int = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : List[str] = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
__SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
__SCREAMING_SNAKE_CASE : Any = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__SCREAMING_SNAKE_CASE : Union[str, Any] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self :str , *lowerCAmelCase__ :Union[str, Any] ) -> Optional[int]:
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def __magic_name__( self :List[Any] ) -> List[Any]:
self.__enter__()
self._active_patches.append(self )
def __magic_name__( self :Optional[int] ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 9 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase__ = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 68 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowerCAmelCase : List[str] ='true'
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=16 ):
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = RegressionModel()
__SCREAMING_SNAKE_CASE : Optional[int] = deepcopy(lowercase__ )
__SCREAMING_SNAKE_CASE : Any = RegressionDataset(length=lowercase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def _UpperCamelCase ( lowercase__ , lowercase__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__SCREAMING_SNAKE_CASE : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__SCREAMING_SNAKE_CASE : Tuple = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__SCREAMING_SNAKE_CASE : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : str = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(lowercase__ , not dispatch_batches )
__SCREAMING_SNAKE_CASE : List[str] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[str] = []
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = batch.values()
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Dict = model(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def _UpperCamelCase ( lowercase__ , lowercase__=82 , lowercase__=False , lowercase__=False , lowercase__=16 ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'''
def _UpperCamelCase ( lowercase__ = False , lowercase__ = False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : Dict = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
__SCREAMING_SNAKE_CASE : int = metric.compute()
# Then do distributed
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__SCREAMING_SNAKE_CASE : int = model(**lowercase__ )
__SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 )
__SCREAMING_SNAKE_CASE : Any = batch['''labels''']
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Dict = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__SCREAMING_SNAKE_CASE : Tuple = Accelerator()
test_torch_metrics(lowercase__ , 512 )
accelerator.state._reset_state()
def _UpperCamelCase ( lowercase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = nn.functional.normalize(lowercase__)
_a = nn.functional.normalize(lowercase__)
return torch.mm(lowercase__ , normalized_text_embeds.t())
class __A ( A__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = CLIPConfig
__lowerCamelCase : List[str] = ['''CLIPEncoderLayer''']
def __init__(self , A ) -> Tuple:
"""simple docstring"""
super().__init__(lowerCAmelCase__ )
_a = CLIPVisionModel(config.vision_config )
_a = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase__ )
_a = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
_a = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase__ )
_a = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase__ )
_a = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase__ )
@torch.no_grad()
def a__ (self , A , A ) -> Dict:
"""simple docstring"""
_a = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
_a = self.visual_projection(lowerCAmelCase__ )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_a = cosine_distance(lowerCAmelCase__ , self.special_care_embeds ).cpu().float().numpy()
_a = cosine_distance(lowerCAmelCase__ , self.concept_embeds ).cpu().float().numpy()
_a = []
_a = image_embeds.shape[0]
for i in range(lowerCAmelCase__ ):
_a = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_a = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
_a = special_cos_dist[i][concept_idx]
_a = self.special_care_embeds_weights[concept_idx].item()
_a = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
_a = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
_a = cos_dist[i][concept_idx]
_a = self.concept_embeds_weights[concept_idx].item()
_a = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
_a = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def a__ (self , A , A ) -> Any:
"""simple docstring"""
_a = self.vision_model(lowerCAmelCase__ )[1] # pooled_output
_a = self.visual_projection(lowerCAmelCase__ )
_a = cosine_distance(lowerCAmelCase__ , self.special_care_embeds )
_a = cosine_distance(lowerCAmelCase__ , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_a = 0.0
_a = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_a = torch.any(special_scores > 0 , dim=1 )
_a = special_care * 0.01
_a = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
_a = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_a = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 211 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase : Union[str, Any] ={
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(lowercase__ ) , version.parse(lowercase__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def _UpperCamelCase ( lowercase__ , lowercase__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , lowercase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = requirement, None, None
else:
__SCREAMING_SNAKE_CASE : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : Optional[int] = want_full.split(''',''' ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
for w in want_range:
__SCREAMING_SNAKE_CASE : Any = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , lowercase__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = match[0]
__SCREAMING_SNAKE_CASE : List[Any] = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE : Optional[Any] = '''.'''.join([str(lowercase__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE : Optional[int] = importlib.metadata.version(lowercase__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(lowercase__ , lowercase__ )
| 9 | 0 |
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Any=30 , __SCREAMING_SNAKE_CASE : Optional[Any]=400 , __SCREAMING_SNAKE_CASE : List[str]=3 , ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 288}
__SCREAMING_SNAKE_CASE = size_divisor
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_pad
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple=False ) -> List[Any]:
"""simple docstring"""
if not batched:
__SCREAMING_SNAKE_CASE = self.size['''shortest_edge''']
__SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__SCREAMING_SNAKE_CASE = image.size
else:
__SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
__SCREAMING_SNAKE_CASE = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
__SCREAMING_SNAKE_CASE = size, scale * w
else:
__SCREAMING_SNAKE_CASE = scale * h, size
__SCREAMING_SNAKE_CASE = int((1_333 / 800) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
__SCREAMING_SNAKE_CASE = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = newh * scale
__SCREAMING_SNAKE_CASE = neww * scale
__SCREAMING_SNAKE_CASE = int(newh + 0.5 ), int(neww + 0.5 )
__SCREAMING_SNAKE_CASE = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__SCREAMING_SNAKE_CASE = []
for image in image_inputs:
__SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0]
__SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
__SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 267 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 0 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_SCREAMING_SNAKE_CASE = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE_ :
__magic_name__: str
__magic_name__: Optional[str] = None
__magic_name__: Optional[Union[str, int]] = None
__magic_name__: Optional[Union[str, int]] = None
__magic_name__: Optional[Union[str, int]] = None
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def UpperCAmelCase_ ( self : str ) -> str:
"""simple docstring"""
return self.major, self.minor, self.patch
def UpperCAmelCase_ ( self : List[Any] , _A : List[Any] ) -> List[Any]:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return Version(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return other
raise TypeError(F"""{other} (type {type(lowerCAmelCase__ )}) cannot be compared to version.""" )
def __eq__( self : Optional[Any] , _A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
try:
snake_case_ : int = self._validate_operand(lowerCAmelCase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Tuple , _A : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ : str = self._validate_operand(lowerCAmelCase__ )
return self.tuple < other.tuple
def __hash__( self : Any ) -> List[Any]:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCAmelCase_ ( cls : Any , _A : Dict ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
return self.version_str
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[Any] = _VERSION_REG.match(lowercase__ )
if not res:
raise ValueError(f"""Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(lowercase__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def SCREAMING_SNAKE_CASE__ ( __a ):
return ".".join(str(lowercase__ ) for v in version_tuple )
| 327 |
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''keras_nlp''']
def __init__( self :Tuple , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Dict:
requires_backends(self , ['''keras_nlp'''] )
| 9 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.