code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from math import factorial, pi
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 30 ) -> float:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
UpperCAmelCase__ : Dict = float(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = 30 ) -> float:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
UpperCAmelCase__ : Union[str, Any] = float(lowerCAmelCase__ )
UpperCAmelCase__ : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 181 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
UpperCamelCase__ , UpperCamelCase__ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
UpperCamelCase__ = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCamelCase__ = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 181 | 1 |
"""simple docstring"""
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : int = 0
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[Any] = {}
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if vertex not in self.adjacency:
lowerCamelCase__ : Optional[Any] = {}
self.num_vertices += 1
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
self.add_vertex(lowerCamelCase_ )
self.add_vertex(lowerCamelCase_ )
if head == tail:
return
lowerCamelCase__ : List[Any] = weight
lowerCamelCase__ : Optional[int] = weight
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.get_edges()
for edge in edges:
lowerCamelCase__ : Tuple = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase_ ) ):
lowerCamelCase__ : Optional[Any] = list(edges[i] )
edges.sort(key=lambda lowerCamelCase_ : e[2] )
for i in range(len(lowerCamelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCamelCase__ : Optional[int] = edges[i][2] + 1
for edge in edges:
lowerCamelCase__ : int = edge
lowerCamelCase__ : Any = weight
lowerCamelCase__ : Any = weight
def __str__(self ):
'''simple docstring'''
lowerCamelCase__ : str = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCamelCase__ : List[Any] = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def a__ (self ):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def a__ (lowerCamelCase_=None, lowerCamelCase_=None ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = Graph()
if vertices is None:
lowerCamelCase__ : Any = []
if edges is None:
lowerCamelCase__ : Any = []
for vertex in vertices:
g.add_vertex(lowerCamelCase_ )
for edge in edges:
g.add_edge(*lowerCamelCase_ )
return g
class a_ :
'''simple docstring'''
def __init__(self ):
'''simple docstring'''
lowerCamelCase__ : int = {}
lowerCamelCase__ : List[Any] = {}
def __len__(self ):
'''simple docstring'''
return len(self.parent )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if item in self.parent:
return self.find(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] = item
lowerCamelCase__ : Union[str, Any] = 0
return item
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCamelCase_ )
if item != self.parent[item]:
lowerCamelCase__ : int = self.find(self.parent[item] )
return self.parent[item]
def a__ (self, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.find(lowerCamelCase_ )
lowerCamelCase__ : Dict = self.find(lowerCamelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCamelCase__ : Dict = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCamelCase__ : str = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCamelCase__ : Union[str, Any] = roota
return roota
return None
@staticmethod
def a__ (lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = graph.num_vertices
lowerCamelCase__ : Optional[Any] = Graph.UnionFind()
lowerCamelCase__ : Optional[Any] = []
while num_components > 1:
lowerCamelCase__ : Optional[int] = {}
for vertex in graph.get_vertices():
lowerCamelCase__ : Union[str, Any] = -1
lowerCamelCase__ : Tuple = graph.get_edges()
for edge in edges:
lowerCamelCase__ : Optional[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCamelCase__ : List[str] = edge
lowerCamelCase__ : List[str] = union_find.find(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = union_find.find(lowerCamelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase__ : Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCamelCase__ : Union[str, Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCamelCase__ : Any = cheap_edge[vertex]
if union_find.find(lowerCamelCase_ ) != union_find.find(lowerCamelCase_ ):
union_find.union(lowerCamelCase_, lowerCamelCase_ )
mst_edges.append(cheap_edge[vertex] )
lowerCamelCase__ : Union[str, Any] = num_components - 1
lowerCamelCase__ : Union[str, Any] = Graph.build(edges=lowerCamelCase_ )
return mst
| 350 |
"""simple docstring"""
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 316 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : Union[str, Any] = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMAEForPreTraining",
"ViTMAELayer",
"ViTMAEModel",
"ViTMAEPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"TFViTMAEForPreTraining",
"TFViTMAEModel",
"TFViTMAEPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 118 | import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''char'''
lowerCamelCase__ = '''bpe'''
lowerCamelCase__ = '''wp'''
A : Tuple = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase__ = '''ViTImageProcessor'''
lowerCamelCase__ = '''MgpstrTokenizer'''
def __init__( self : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : int=None , **__magic_name__ : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __magic_name__ , )
SCREAMING_SNAKE_CASE_ = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("gpt2" )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : Dict , __magic_name__ : Union[str, Any]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Dict=None , **__magic_name__ : Tuple ) -> int:
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE_ = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.char_tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings["input_ids"]
return inputs
def __A ( self : Tuple , __magic_name__ : int ) -> Any:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = sequences
SCREAMING_SNAKE_CASE_ = char_preds.size(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "char" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "bpe" )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._decode_helper(__magic_name__ , "wp" )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for i in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = [char_scores[i], bpe_scores[i], wp_scores[i]]
SCREAMING_SNAKE_CASE_ = [char_strs[i], bpe_strs[i], wp_strs[i]]
SCREAMING_SNAKE_CASE_ = scores.index(max(__magic_name__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = final_strs
SCREAMING_SNAKE_CASE_ = final_scores
SCREAMING_SNAKE_CASE_ = char_strs
SCREAMING_SNAKE_CASE_ = bpe_strs
SCREAMING_SNAKE_CASE_ = wp_strs
return out
def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : str ) -> Any:
if format == DecodeType.CHARACTER:
SCREAMING_SNAKE_CASE_ = self.char_decode
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = "[s]"
elif format == DecodeType.BPE:
SCREAMING_SNAKE_CASE_ = self.bpe_decode
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = "#"
elif format == DecodeType.WORDPIECE:
SCREAMING_SNAKE_CASE_ = self.wp_decode
SCREAMING_SNAKE_CASE_ = 102
SCREAMING_SNAKE_CASE_ = "[SEP]"
else:
raise ValueError(F'''Format {format} is not supported.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = [], []
SCREAMING_SNAKE_CASE_ = pred_logits.size(0 )
SCREAMING_SNAKE_CASE_ = pred_logits.size(1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pred_logits.topk(1 , dim=-1 , largest=__magic_name__ , sorted=__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_index.view(-1 , __magic_name__ )[:, 1:]
SCREAMING_SNAKE_CASE_ = decoder(__magic_name__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = torch.nn.functional.softmax(__magic_name__ , dim=2 ).max(dim=2 )
SCREAMING_SNAKE_CASE_ = preds_max_prob[:, 1:]
for index in range(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = preds_str[index].find(__magic_name__ )
SCREAMING_SNAKE_CASE_ = preds_str[index][:pred_eos]
SCREAMING_SNAKE_CASE_ = preds_index[index].cpu().tolist()
SCREAMING_SNAKE_CASE_ = pred_index.index(__magic_name__ ) if eos_token in pred_index else -1
SCREAMING_SNAKE_CASE_ = preds_max_prob[index][: pred_eos_index + 1]
SCREAMING_SNAKE_CASE_ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__magic_name__ )
conf_scores.append(__magic_name__ )
return dec_strs, conf_scores
def __A ( self : Any , __magic_name__ : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
def __A ( self : Any , __magic_name__ : Union[str, Any] ) -> Tuple:
return self.bpe_tokenizer.batch_decode(__magic_name__ )
def __A ( self : str , __magic_name__ : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__magic_name__ )]
return decode_strs
| 118 | 1 |
"""simple docstring"""
import math
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float ) -> float:
if (
not isinstance(lowerCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float ) -> float:
if (
not isinstance(lowerCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 350 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ) -> tuple:
lowerCamelCase_ : Optional[Any] =namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 0 |
'''simple docstring'''
def _A ( ):
lowercase__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowercase__ = 6
lowercase__ = 1
lowercase__ = 1901
lowercase__ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowercase__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowercase__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowercase__ = day - days_per_month[month - 2]
if month > 12:
year += 1
lowercase__ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 164 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__A = "sshleifer/mar_enro_6_3_student"
class A ( __UpperCAmelCase ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
lowercase__ = cached_path(
"""https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz""" , extract_compressed_file=lowerCamelCase__ , )
lowercase__ = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def A__ ( self ) -> str:
'''simple docstring'''
MarianMTModel.from_pretrained(lowerCamelCase__ )
@slow
@require_torch_gpu
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = {
"""$MAX_LEN""": 64,
"""$BS""": 64,
"""$GAS""": 1,
"""$ENRO_DIR""": self.data_dir,
"""facebook/mbart-large-cc25""": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"""--learning_rate=3e-5""": """--learning_rate 3e-4""",
"""--num_train_epochs 6""": """--num_train_epochs 1""",
}
# Clean up bash script
lowercase__ = (self.test_file_dir / """train_mbart_cc25_enro.sh""").open().read().split("""finetune.py""" )[1].strip()
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
lowercase__ = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
lowercase__ = ["""finetune.py"""] + bash_script.split() + args
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationModule.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
lowercase__ = main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
self.assertEqual(len(metrics["""val"""] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
self.assertGreater(last_step_stats["""val_avg_gen_time"""] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["""val_avg_gen_time"""] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["""val_avg_bleu"""] - first_step_stats["""val_avg_bleu"""] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["""val_avg_bleu"""] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["""val"""][-1]["""val_avg_bleu"""] - metrics["""test"""][-1]["""test_avg_bleu"""] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
class A ( __UpperCAmelCase ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
lowercase__ = {
"""--fp16_opt_level=O1""": """""",
"""$MAX_LEN""": 128,
"""$BS""": 16,
"""$GAS""": 1,
"""$ENRO_DIR""": data_dir,
"""$m""": """sshleifer/student_marian_en_ro_6_1""",
"""val_check_interval=0.25""": """val_check_interval=1.0""",
}
# Clean up bash script
lowercase__ = (
(self.test_file_dir / """distil_marian_no_teacher.sh""").open().read().split("""distillation.py""" )[1].strip()
)
lowercase__ = bash_script.replace("""\\\n""" , """""" ).strip().replace("""\"$@\"""" , """""" )
lowercase__ = bash_script.replace("""--fp16 """ , """ """ )
for k, v in env_vars_to_replace.items():
lowercase__ = bash_script.replace(lowerCamelCase__ , str(lowerCamelCase__ ) )
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = bash_script.replace("""--fp16""" , """""" )
lowercase__ = 6
lowercase__ = (
["""distillation.py"""]
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
"""--gpus=1""",
"""--learning_rate=1e-3""",
F'''--num_train_epochs={epochs}''',
"""--warmup_steps=10""",
"""--val_check_interval=1.0""",
"""--do_predict""",
]
)
with patch.object(lowerCamelCase__ , """argv""" , lowerCamelCase__ ):
lowercase__ = argparse.ArgumentParser()
lowercase__ = pl.Trainer.add_argparse_args(lowerCamelCase__ )
lowercase__ = SummarizationDistiller.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
lowercase__ = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
lowercase__ = distill_main(lowerCamelCase__ )
# Check metrics
lowercase__ = load_json(model.metrics_save_path )
lowercase__ = metrics["""val"""][0]
lowercase__ = metrics["""val"""][-1]
assert len(metrics["""val"""] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , lowerCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
lowercase__ = os.listdir(lowerCamelCase__ )
lowercase__ = [x for x in contents if x.endswith(""".ckpt""" )][0]
lowercase__ = os.path.join(args.output_dir , lowerCamelCase__ )
lowercase__ = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowercase__ = """model.model.decoder.layers.0.encoder_attn_layer_norm.weight"""
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
lowercase__ = {os.path.basename(lowerCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["""test"""] ) == 1
| 164 | 1 |
'''simple docstring'''
from __future__ import annotations
def _snake_case ( _SCREAMING_SNAKE_CASE : dict , _SCREAMING_SNAKE_CASE : str ) -> set[str]:
"""simple docstring"""
lowerCAmelCase, lowerCAmelCase = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
lowerCAmelCase = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
UpperCAmelCase = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A')) | 367 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __snake_case( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False , **A_ ) -> Optional[int]:
super().__init__(**A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = d_embed
lowerCAmelCase = d_proj
lowerCAmelCase = cutoffs + [vocab_size]
lowerCAmelCase = [0] + self.cutoffs
lowerCAmelCase = div_val
lowerCAmelCase = self.cutoffs[0]
lowerCAmelCase = len(self.cutoffs ) - 1
lowerCAmelCase = self.shortlist_size + self.n_clusters
lowerCAmelCase = keep_order
lowerCAmelCase = []
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> int:
if self.n_clusters > 0:
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=A_ , name="""cluster_weight""" )
lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=A_ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' , )
self.out_projs.append(A_ )
else:
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase = self.d_embed // (self.div_val**i)
lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' )
self.out_projs.append(A_ )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , )
lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(A_ )
@staticmethod
def __snake_case ( A_ , A_ , A_ , A_=None ) -> List[Any]:
lowerCAmelCase = x
if proj is not None:
lowerCAmelCase = tf.einsum("""ibd,ed->ibe""" , A_ , A_ )
return tf.einsum("""ibd,nd->ibn""" , A_ , A_ ) + b
@staticmethod
def __snake_case ( A_ , A_ ) -> Dict:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(A_ , A_ )
def __snake_case ( self , A_ , A_ , A_=True , A_=False ) -> Tuple:
lowerCAmelCase = 0
if self.n_clusters == 0:
lowerCAmelCase = self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_ )
lowerCAmelCase = tf.nn.log_softmax(A_ , axis=-1 )
else:
lowerCAmelCase = shape_list(A_ )
lowerCAmelCase = []
lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
lowerCAmelCase = (target >= l_idx) & (target < r_idx)
lowerCAmelCase = tf.where(A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ ) - l_idx
if self.div_val == 1:
lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
lowerCAmelCase = self.out_layers[i][0]
lowerCAmelCase = self.out_layers[i][1]
if i == 0:
lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[0] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
else:
lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[i] )
lowerCAmelCase = tf.nn.log_softmax(A_ )
lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(A_ )
if target is not None:
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = tf.boolean_mask(A_ , A_ )
lowerCAmelCase = self._gather_logprob(A_ , A_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_ ) )
lowerCAmelCase = tf.concat(A_ , axis=-1 )
if target is not None:
if return_mean:
lowerCAmelCase = tf.reduce_mean(A_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(A_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(A_ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out | 187 | 0 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCAmelCase : Optional[Any] = """src/diffusers"""
# Matches is_xxx_available()
lowerCAmelCase : str = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
lowerCAmelCase : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
lowerCAmelCase : Union[str, Any] = """
{0} = None
"""
lowerCAmelCase : List[str] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
lowerCAmelCase : List[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = _re_backend.findall(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
return "_and_".join(_UpperCAmelCase )
def A_ ( ):
with open(os.path.join(_UpperCAmelCase , "__init__.py" ) , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_: Optional[Any] = f.readlines()
# Get to the point we do the actual imports for type checking
SCREAMING_SNAKE_CASE_: Union[str, Any] = 0
SCREAMING_SNAKE_CASE_: Optional[int] = {}
# Go through the end of the file
while line_index < len(_UpperCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
SCREAMING_SNAKE_CASE_: List[str] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE_: str = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCAmelCase ) and len(lines[line_index] ) > 1:
SCREAMING_SNAKE_CASE_: List[Any] = lines[line_index]
SCREAMING_SNAKE_CASE_: Optional[int] = _re_single_line_import.search(_UpperCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE_: Any = objects
else:
line_index += 1
return backend_specific_objects
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCAmelCase , _UpperCAmelCase )
else:
return DUMMY_CLASS.format(_UpperCAmelCase , _UpperCAmelCase )
def A_ ( _UpperCAmelCase=None ):
if backend_specific_objects is None:
SCREAMING_SNAKE_CASE_: Optional[Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
for backend, objects in backend_specific_objects.items():
SCREAMING_SNAKE_CASE_: Tuple = "[" + ", ".join(f"\"{b}\"" for b in backend.split("_and_" ) ) + "]"
SCREAMING_SNAKE_CASE_: Tuple = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCAmelCase , _UpperCAmelCase ) for o in objects] )
SCREAMING_SNAKE_CASE_: Any = dummy_file
return dummy_files
def A_ ( _UpperCAmelCase=False ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
SCREAMING_SNAKE_CASE_: int = {"torch": "pt"}
# Locate actual dummy modules and read their content.
SCREAMING_SNAKE_CASE_: Dict = os.path.join(_UpperCAmelCase , "utils" )
SCREAMING_SNAKE_CASE_: List[Any] = {
backend: os.path.join(_UpperCAmelCase , f"dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py" )
for backend in dummy_files.keys()
}
SCREAMING_SNAKE_CASE_: Any = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCAmelCase ):
with open(_UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE_: Tuple = f.read()
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py as the main "
"__init__ has new objects." )
with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f"diffusers.utils.dummy_{short_names.get(_UpperCAmelCase , _UpperCAmelCase )}_objects.py. Run `make fix-copies` "
"to fix this." )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCAmelCase : str = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 13 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Any = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Tuple = emb.weight.shape
_lowerCamelCase : Dict = nn.Linear(A_, A_, bias=A_ )
_lowerCamelCase : str = emb.weight.data
return lin_layer
def snake_case_ ( A_ : str, A_ : Optional[int]="facebook/mbart-large-en-ro", A_ : Union[str, Any]=False, A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Tuple = torch.load(A_, map_location='''cpu''' )['''model''']
remove_ignore_keys_(A_ )
_lowerCamelCase : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_lowerCamelCase : Any = MBartConfig.from_pretrained(A_, vocab_size=A_ )
if mbart_aa and finetuned:
_lowerCamelCase : Any = '''relu'''
_lowerCamelCase : Optional[int] = state_dict['''decoder.embed_tokens.weight''']
_lowerCamelCase : Any = MBartForConditionalGeneration(A_ )
model.model.load_state_dict(A_ )
if finetuned:
_lowerCamelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 72 | 0 |
import numpy
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
_A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_A = numpy.random.rand(3 , 1 )
# Real output values provided.
_A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_A = numpy.zeros(output_array.shape )
def UpperCAmelCase ( self ) -> numpy.ndarray:
_A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase ( self ) -> None:
_A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
for iteration in range(1 , iterations + 1 ):
_A = self.feedforward()
self.back_propagation()
if give_loss:
_A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = input_arr
_A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def snake_case ( snake_case__ :Optional[int]) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value))
def snake_case ( snake_case__ :Dict) -> numpy.ndarray:
return (value) * (1 - (value))
def snake_case ( ) -> int:
_A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa)
# Calling neural network class.
_A = TwoHiddenLayerNeuralNetwork(
input_array=a__ , output_array=a__)
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a__ , iterations=10 , give_loss=a__)
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa))
if __name__ == "__main__":
example()
| 361 | import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 81 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Optional[int] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''], model_result['''ss'''] ):
__A = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sgugger/tiny-distilbert-classification'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, only_pretrain_model=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, torchscript=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''', '''Cant do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, fpaa=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
__A = None
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''', '''Can\'t do half precision''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], fpaa=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = '''sshleifer/tinier_bart'''
__A = AutoConfig.from_pretrained(_lowerCamelCase )
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase, configs=[config] )
__A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, save_to_csv=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(_lowerCamelCase, '''inf_time.csv''' ), train_memory_csv_file=os.path.join(_lowerCamelCase, '''train_mem.csv''' ), inference_memory_csv_file=os.path.join(_lowerCamelCase, '''inf_mem.csv''' ), train_time_csv_file=os.path.join(_lowerCamelCase, '''train_time.csv''' ), env_info_csv_file=os.path.join(_lowerCamelCase, '''env.csv''' ), multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''env.csv''' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase : List[Any] ):
self.assertTrue(hasattr(_lowerCamelCase, '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__A = PyTorchBenchmarkArguments(
models=[MODEL_ID], training=_lowerCamelCase, inference=_lowerCamelCase, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(_lowerCamelCase, '''log.txt''' ), log_print=_lowerCamelCase, trace_memory_line_by_line=_lowerCamelCase, multi_process=_lowerCamelCase, )
__A = PyTorchBenchmark(_lowerCamelCase )
__A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase, '''log.txt''' ) ).exists() )
| 266 | 1 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCamelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=None , __a=0 ):
__lowerCAmelCase = 1.0 if scale is None else scale
__lowerCAmelCase = 0.0 if loc is None else loc
super().__init__(_A , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_A )] )
@property
def snake_case ( self ):
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case ( self ):
return self.base_dist.variance * self.scale**2
@property
def snake_case ( self ):
return self.variance.sqrt()
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , **__a ):
super().__init__(**_A )
__lowerCAmelCase = args_dim
__lowerCAmelCase = nn.ModuleList([nn.Linear(_A , _A ) for dim in args_dim.values()] )
__lowerCAmelCase = domain_map
def snake_case ( self , __a ):
__lowerCAmelCase = [proj(_A ) for proj in self.proj]
return self.domain_map(*_A )
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = function
def snake_case ( self , __a , *__a ):
return self.function(_A , *_A )
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : type
__UpperCAmelCase : int
__UpperCAmelCase : Dict[str, int]
def __init__( self , __a = 1 ):
__lowerCAmelCase = dim
__lowerCAmelCase = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case ( self , __a ):
if self.dim == 1:
return self.distribution_class(*_A )
else:
return Independent(self.distribution_class(*_A ) , 1 )
def snake_case ( self , __a , __a = None , __a = None , ):
__lowerCAmelCase = self._base_distribution(_A )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_A , loc=_A , scale=_A , event_dim=self.event_dim )
@property
def snake_case ( self ):
return () if self.dim == 1 else (self.dim,)
@property
def snake_case ( self ):
return len(self.event_shape )
@property
def snake_case ( self ):
return 0.0
def snake_case ( self , __a ):
return ParameterProjection(
in_features=_A , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def snake_case ( self , *__a ):
raise NotImplementedError()
@staticmethod
def snake_case ( __a ):
return (x + torch.sqrt(torch.square(_A ) + 4.0 )) / 2.0
class _UpperCamelCase ( snake_case__ ):
'''simple docstring'''
__UpperCAmelCase : Dict[str, int] ={"df": 1, "loc": 1, "scale": 1}
__UpperCAmelCase : type =StudentT
@classmethod
def snake_case ( cls , __a , __a , __a ):
__lowerCAmelCase = cls.squareplus(_A ).clamp_min(torch.finfo(scale.dtype ).eps )
__lowerCAmelCase = 2.0 + cls.squareplus(_A )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCamelCase ( snake_case__ ):
'''simple docstring'''
__UpperCAmelCase : Dict[str, int] ={"loc": 1, "scale": 1}
__UpperCAmelCase : type =Normal
@classmethod
def snake_case ( cls , __a , __a ):
__lowerCAmelCase = cls.squareplus(_A ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCamelCase ( snake_case__ ):
'''simple docstring'''
__UpperCAmelCase : Dict[str, int] ={"total_count": 1, "logits": 1}
__UpperCAmelCase : type =NegativeBinomial
@classmethod
def snake_case ( cls , __a , __a ):
__lowerCAmelCase = cls.squareplus(_A )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def snake_case ( self , __a ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_A , logits=_A )
else:
return Independent(self.distribution_class(total_count=_A , logits=_A ) , 1 )
def snake_case ( self , __a , __a = None , __a = None ):
__lowerCAmelCase , __lowerCAmelCase = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 364 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if is_torch_version("<" , "2.0.0" ) or not hasattr(_UpperCamelCase , "_dynamo" ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase = True ):
'''simple docstring'''
__lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase = getattr(_UpperCamelCase , "forward" )
__lowerCAmelCase = model.__dict__.pop("_original_forward" , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , "__wrapped__" ):
__lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase = forward
if getattr(_UpperCamelCase , "_converted_to_transformer_engine" , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase = model
__lowerCAmelCase = compiled_model
return model
def _lowerCamelCase ( ):
'''simple docstring'''
PartialState().wait_for_everyone()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def _lowerCamelCase ( **_UpperCamelCase ):
'''simple docstring'''
for key, value in kwargs.items():
__lowerCAmelCase = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not hasattr(_UpperCamelCase , "__qualname__" ) and not hasattr(_UpperCamelCase , "__name__" ):
__lowerCAmelCase = getattr(_UpperCamelCase , "__class__" , _UpperCamelCase )
if hasattr(_UpperCamelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(_UpperCamelCase , "__name__" ):
return obj.__name__
return str(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase = value
return destination
def _lowerCamelCase ( _UpperCamelCase = None ):
'''simple docstring'''
if port is None:
__lowerCAmelCase = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 259 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = JukeboxTokenizer
lowerCamelCase__ = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def A ( self : str ) -> Tuple:
import torch
UpperCAmelCase : Tuple = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
UpperCAmelCase : Optional[Any] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase : List[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def A ( self : Optional[Any] ) -> str:
import torch
UpperCAmelCase : Tuple = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
UpperCAmelCase : str = tokenizer(**self.metas )['''input_ids''']
# fmt: off
UpperCAmelCase : Union[str, Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 23 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = cos(_lowerCAmelCase )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : Any = (1 - _cos) / 2
UpperCAmelCase : List[Any] = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Dict = 1 - alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Tuple = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Tuple = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = _sin / 2
UpperCAmelCase : Any = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : Optional[Any] = 1 + alpha
UpperCAmelCase : List[Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
UpperCAmelCase : List[str] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : str = cos(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 1 - alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Optional[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = cos(_lowerCAmelCase )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : str = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Union[str, Any] = -2 * _cos
UpperCAmelCase : Optional[Any] = 1 - alpha * big_a
UpperCAmelCase : Union[str, Any] = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : Any = 1 - alpha / big_a
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : Any = tau * frequency / samplerate
UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : int = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Optional[int] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : str = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Dict = big_a * (pmc + aaa)
UpperCAmelCase : Any = 2 * big_a * mpc
UpperCAmelCase : Union[str, Any] = big_a * (pmc - aaa)
UpperCAmelCase : Optional[int] = ppmc + aaa
UpperCAmelCase : Optional[Any] = -2 * pmpc
UpperCAmelCase : Optional[Any] = ppmc - aaa
UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : float , _lowerCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = cos(_lowerCAmelCase )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : int = 10 ** (gain_db / 40)
UpperCAmelCase : List[str] = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(_lowerCAmelCase ) * alpha
UpperCAmelCase : Any = big_a * (ppmc + aaa)
UpperCAmelCase : str = -2 * big_a * pmpc
UpperCAmelCase : List[Any] = big_a * (ppmc - aaa)
UpperCAmelCase : Optional[Any] = pmc + aaa
UpperCAmelCase : Any = 2 * mpc
UpperCAmelCase : str = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 23 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__a = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__a = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase_ : Any = bs[:]
UpperCAmelCase_ : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase_ : int = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = set()
UpperCAmelCase_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_ : Union[str, Any] = char
return pairs
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="replace" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="</s>" ,_SCREAMING_SNAKE_CASE="<s>" ,_SCREAMING_SNAKE_CASE="<unk>" ,_SCREAMING_SNAKE_CASE="<pad>" ,_SCREAMING_SNAKE_CASE="<mask>" ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else bos_token
UpperCAmelCase_ : Any = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else eos_token
UpperCAmelCase_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else sep_token
UpperCAmelCase_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else cls_token
UpperCAmelCase_ : Any = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else unk_token
UpperCAmelCase_ : int = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : str = AddedToken(_SCREAMING_SNAKE_CASE ,lstrip=_SCREAMING_SNAKE_CASE ,rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=_SCREAMING_SNAKE_CASE ,bos_token=_SCREAMING_SNAKE_CASE ,eos_token=_SCREAMING_SNAKE_CASE ,unk_token=_SCREAMING_SNAKE_CASE ,sep_token=_SCREAMING_SNAKE_CASE ,cls_token=_SCREAMING_SNAKE_CASE ,pad_token=_SCREAMING_SNAKE_CASE ,mask_token=_SCREAMING_SNAKE_CASE ,add_prefix_space=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
with open(_SCREAMING_SNAKE_CASE ,encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase_ : Tuple = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ : Optional[int] = errors # how to handle errors in decoding
UpperCAmelCase_ : Optional[int] = bytes_to_unicode()
UpperCAmelCase_ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_SCREAMING_SNAKE_CASE ,encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase_ : Any = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase_ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase_ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase_ : Any = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def a__ ( self ) -> int:
return len(self.encoder )
def a__ ( self ) -> Optional[Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int:
if token in self.cache:
return self.cache[token]
UpperCAmelCase_ : int = tuple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
UpperCAmelCase_ : Dict = min(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = bigram
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Any = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
UpperCAmelCase_ : Dict = word.index(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_ : Tuple = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_ : List[Any] = tuple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
UpperCAmelCase_ : Optional[int] = get_pairs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = ''' '''.join(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = word
return word
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = []
for token in re.findall(self.pat ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Any = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_SCREAMING_SNAKE_CASE ).split(''' ''' ) )
return bpe_tokens
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.encoder.get(_SCREAMING_SNAKE_CASE ,self.encoder.get(self.unk_token ) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Optional[Any] = ''''''.join(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' ,errors=self.errors )
return text
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase_ : Dict = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Tuple = os.path.join(
_SCREAMING_SNAKE_CASE ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_SCREAMING_SNAKE_CASE ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_SCREAMING_SNAKE_CASE ,ensure_ascii=_SCREAMING_SNAKE_CASE ) + '''\n''' )
UpperCAmelCase_ : Optional[Any] = 0
with open(_SCREAMING_SNAKE_CASE ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase_ : List[str] = token_index
writer.write(''' '''.join(_SCREAMING_SNAKE_CASE ) + '''\n''' )
index += 1
return vocab_file, merge_file
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase_ : Dict = [self.cls_token_id]
UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE ,token_ids_a=_SCREAMING_SNAKE_CASE ,already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ) -> List[int]:
UpperCAmelCase_ : Tuple = [self.sep_token_id]
UpperCAmelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = kwargs.pop('''add_prefix_space''' ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
UpperCAmelCase_ : List[Any] = ''' ''' + text
return (text, kwargs) | 235 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__a = logging.getLogger(__name__)
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30_522, type=int)
__a = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, 'rb') as fp:
__a = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
__a = Counter()
for tk_ids in data:
counter.update(tk_ids)
__a = [0] * args.vocab_size
for k, v in counter.items():
__a = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 235 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = random.Random()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=1.0 , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[str]=None ) -> str:
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class A__ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any]=7 , _UpperCAmelCase : Union[str, Any]=4_00 , _UpperCAmelCase : List[str]=20_00 , _UpperCAmelCase : int=1 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Any=1_60_00 , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[Any]=80 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : Optional[int]=64 , _UpperCAmelCase : Union[str, Any]="hann_window" , _UpperCAmelCase : Tuple=80 , _UpperCAmelCase : Union[str, Any]=76_00 , _UpperCAmelCase : List[Any]=1e-1_0 , _UpperCAmelCase : str=True , ) -> str:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = do_normalize
__lowercase = num_mel_bins
__lowercase = hop_length
__lowercase = win_length
__lowercase = win_function
__lowercase = fmin
__lowercase = fmax
__lowercase = mel_floor
__lowercase = return_attention_mask
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def a__ ( self : List[str] , _UpperCAmelCase : Dict=False , _UpperCAmelCase : List[str]=False ) -> List[str]:
"""simple docstring"""
def _flatten(_UpperCAmelCase : Union[str, Any] ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
__lowercase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowercase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Any=False , _UpperCAmelCase : str=False ) -> str:
"""simple docstring"""
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Any = SpeechTaFeatureExtractor
def a__ ( self : Any ) -> str:
"""simple docstring"""
__lowercase = SpeechTaFeatureExtractionTester(self )
def a__ ( self : List[Any] , _UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
self.assertTrue(np.all(np.mean(_UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowercase = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__lowercase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
__lowercase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test batched
__lowercase = feat_extract(_UpperCAmelCase , return_tensors='np' ).input_values
__lowercase = feat_extract(_UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowercase = ['longest', 'max_length', 'do_not_pad']
__lowercase = [None, 16_00, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = feat_extract(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def a__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = range(8_00 , 14_00 , 2_00 )
__lowercase = [floats_list((1, x) )[0] for x in lengths]
__lowercase = ['longest', 'max_length', 'do_not_pad']
__lowercase = [None, 16_00, None]
for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = feat_extract(_UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def a__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowercase = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10_00 , padding='max_length' , return_tensors='np' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowercase = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10_00 , padding='longest' , return_tensors='np' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
__lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowercase = feat_extract(
_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=20_00 , padding='longest' , return_tensors='np' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(1_00 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowercase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowercase = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(audio_target=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_values
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test batched
__lowercase = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_values
__lowercase = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowercase = np.asarray(_UpperCAmelCase )
__lowercase = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_values
__lowercase = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 ) )
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) for x, y in zip(_UpperCAmelCase , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=_UpperCAmelCase )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.num_mel_bins # hack!
__lowercase = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np' )[input_name]
__lowercase = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_UpperCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = [len(_UpperCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.num_mel_bins # hack!
__lowercase = feat_extract.pad(_UpperCAmelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _UpperCAmelCase )
def a__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**_UpperCAmelCase )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = [len(_UpperCAmelCase ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(_UpperCAmelCase )
__lowercase = feat_extract.num_mel_bins # hack!
__lowercase = feat_extract.pad(
_UpperCAmelCase , padding='max_length' , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='np' )
self.assertIn('attention_mask' , _UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def a__ ( self : Optional[int] , _UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
from datasets import load_dataset
__lowercase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__lowercase = ds.sort('id' ).select(range(_UpperCAmelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = SpeechTaFeatureExtractor()
__lowercase = feature_extractor(_UpperCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 9_36_80) )
self.assertTrue(torch.allclose(input_values[0, :30] , _UpperCAmelCase , atol=1e-6 ) )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = SpeechTaFeatureExtractor()
__lowercase = feature_extractor(audio_target=_UpperCAmelCase , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 3_66, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCAmelCase , atol=1e-4 ) )
| 325 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
UpperCamelCase__ = HfArgumentParser(InitializationArguments)
UpperCamelCase__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
UpperCamelCase__ = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
UpperCamelCase__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
UpperCamelCase__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 87 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Dict = """speech_to_text_2"""
snake_case : List[Any] = ["""past_key_values"""]
snake_case : List[str] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __lowerCAmelCase=10000 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=4 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=1024 , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ = max_target_positions
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 87 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] =(DPMSolverSDEScheduler,)
UpperCamelCase__ : Tuple =1_0
def __lowercase ( self , **lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ={
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**lowerCamelCase__ )
return config
def __lowercase ( self ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.scheduler_classes[0]
__UpperCamelCase : Dict =self.get_scheduler_config()
__UpperCamelCase : Union[str, Any] =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase : Dict =self.dummy_model()
__UpperCamelCase : Optional[int] =self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase : Tuple =sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : Union[str, Any] =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =output.prev_sample
__UpperCamelCase : List[Any] =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =self.scheduler_classes[0]
__UpperCamelCase : int =self.get_scheduler_config(prediction_type='v_prediction' )
__UpperCamelCase : Optional[Any] =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
__UpperCamelCase : Optional[Any] =self.dummy_model()
__UpperCamelCase : Tuple =self.dummy_sample_deter * scheduler.init_noise_sigma
__UpperCamelCase : List[Any] =sample.to(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase : List[Any] =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =output.prev_sample
__UpperCamelCase : int =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Dict =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.scheduler_classes[0]
__UpperCamelCase : str =self.get_scheduler_config()
__UpperCamelCase : Tuple =scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
__UpperCamelCase : str =self.dummy_model()
__UpperCamelCase : Any =self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__UpperCamelCase : Tuple =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : int =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =output.prev_sample
__UpperCamelCase : List[Any] =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Any =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.scheduler_classes[0]
__UpperCamelCase : Optional[int] =self.get_scheduler_config()
__UpperCamelCase : Optional[Any] =scheduler_class(**lowerCamelCase__ , use_karras_sigmas=lowerCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase__ )
__UpperCamelCase : Tuple =self.dummy_model()
__UpperCamelCase : Optional[Any] =self.dummy_sample_deter.to(lowerCamelCase__ ) * scheduler.init_noise_sigma
__UpperCamelCase : int =sample.to(lowerCamelCase__ )
for t in scheduler.timesteps:
__UpperCamelCase : List[str] =scheduler.scale_model_input(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =model(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Dict =output.prev_sample
__UpperCamelCase : Optional[int] =torch.sum(torch.abs(lowerCamelCase__ ) )
__UpperCamelCase : Union[str, Any] =torch.mean(torch.abs(lowerCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
| 71 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a_ :Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a_ :List[str] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a_ :List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : str ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def lowercase_ ( self : str, _snake_case : List[List[List[str]]], _snake_case : List[List[str]], _snake_case : int = 1, _snake_case : int = 4, ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case, hypotheses=_snake_case, min_len=_snake_case, max_len=_snake_case )
}
| 277 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Any = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : int = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Union[str, Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Dict = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : List[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : int = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : str = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Dict = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : List[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : List[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Dict = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : str = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Tuple:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Any = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Union[str, Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : int = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Optional[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : str = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> str:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Union[str, Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Optional[int] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : List[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Dict:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Tuple = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Optional[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Tuple = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : int = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : List[str] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Dict = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[str]:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : Optional[Any] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> str:
requires_backends(self, ['sentencepiece'] )
class lowerCAmelCase_ ( metaclass=a__ ):
UpperCAmelCase__ : List[str] = ["sentencepiece"]
def __init__( self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> str:
requires_backends(self, ['sentencepiece'] )
| 103 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
return image
def UpperCamelCase ( snake_case__ : int ) -> List[Any]:
UpperCamelCase : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Optional[int]:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : str = val
def UpperCamelCase ( snake_case__ : str , snake_case__ : Union[str, Any] ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase : Optional[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCamelCase : int = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCamelCase : int = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
UpperCamelCase : Tuple = qkv_bias
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> Dict:
UpperCamelCase : str = 364 if 'coco' in model_name else 224
UpperCamelCase : Union[str, Any] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase : List[Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase : int = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase : List[str] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase : int = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase : Any = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCamelCase ( snake_case__ : int , snake_case__ : Dict=None , snake_case__ : int=False ) -> List[Any]:
UpperCamelCase : str = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase : int = tokenizer('\n' , add_special_tokens=snake_case__ ).input_ids[0]
UpperCamelCase , UpperCamelCase : Union[str, Any] = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
UpperCamelCase : Dict = BlipaForConditionalGeneration(snake_case__ ).eval()
UpperCamelCase : Optional[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase : Optional[Any] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase : List[Any] = original_model.state_dict()
UpperCamelCase : Tuple = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ )
if key.startswith('Qformer.bert' ):
UpperCamelCase : List[str] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase : Tuple = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase : Union[str, Any] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase : Optional[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase : Dict = key.replace('t5' , 'language' )
UpperCamelCase : Optional[int] = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
UpperCamelCase , UpperCamelCase : Any = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase : List[str] = load_demo_image()
UpperCamelCase : str = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
UpperCamelCase : Any = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(snake_case__ )
# create processor
UpperCamelCase : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=snake_case__ , image_std=snake_case__ )
UpperCamelCase : Any = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCamelCase : Optional[int] = processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase : Tuple = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase : str = hf_model(snake_case__ , snake_case__ ).logits
else:
UpperCamelCase : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase : Optional[int] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase : List[str] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase : Union[str, Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=snake_case__ )
else:
# cast to same type
UpperCamelCase : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase : Optional[int] = ''
UpperCamelCase : Union[str, Any] = tokenizer(snake_case__ , return_tensors='pt' ).input_ids.to(snake_case__ )
UpperCamelCase : str = original_model.generate({'image': original_pixel_values} )
UpperCamelCase : str = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , snake_case__ )
UpperCamelCase : Optional[int] = input_ids.shape[1]
UpperCamelCase : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
UpperCamelCase : Dict = [text.strip() for text in output_text]
print('HF generation:' , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 103 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a__ : int ='''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase__ ( __lowercase : int , __lowercase : Optional[int] , __lowercase : str=None , __lowercase : List[Any]=None , __lowercase : Dict=None , __lowercase : int=None , __lowercase : Union[str, Any]=None , __lowercase : Dict=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
__UpperCamelCase = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__UpperCamelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__UpperCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case :
"""simple docstring"""
def __init__( self : Optional[int] , __A : List[Any] , __A : List[str]=1_3 , __A : str=7 , __A : Any=True , __A : Optional[Any]=False , __A : Optional[int]=9_9 , __A : str=1_6 , __A : List[str]=2 , __A : str=4 , __A : str=4 , __A : Optional[Any]="gelu" , __A : Dict=0.1 , __A : Optional[Any]=0.1 , __A : Tuple=3_2 , __A : Dict=2 , __A : Tuple=1 , __A : List[str]=0 , __A : Optional[int]=0.02 , ):
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
__UpperCamelCase = initializer_range
def _lowerCamelCase ( self : Dict ):
__UpperCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__UpperCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__UpperCamelCase = shift_tokens_right(__A , 1 , 2 )
__UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__A , )
__UpperCamelCase = prepare_blenderbot_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self : List[str] , __A : Dict , __A : Tuple , __A : Optional[int] ):
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(__A )
__UpperCamelCase = model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __A , __A )
__UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __A , decoder_attention_mask=__A , past_key_values=__A , decoder_position_ids=__A , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __A , decoder_attention_mask=__A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__A , )
__UpperCamelCase = model.decode(__A , __A )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _lowerCamelCase ( self : Optional[int] , __A : List[Any] , __A : Union[str, Any] , __A : Any ):
__UpperCamelCase = 2_0
__UpperCamelCase = model_class_name(__A )
__UpperCamelCase = model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , __A , __A )
__UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , __A , decoder_attention_mask=__A , past_key_values=__A , decoder_position_ids=__A , )
__UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , __A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__A , decoder_position_ids=__A , )
__UpperCamelCase = model.decode(__A , __A , decoder_attention_mask=__A )
__UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class snake_case ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =99
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__UpperCamelCase = input_ids.shape[0]
__UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self : List[str] ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self._get_config_and_data()
__UpperCamelCase = FlaxBlenderbotSmallForConditionalGeneration(__A )
__UpperCamelCase = lm_model(input_ids=__A )
__UpperCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , __A )
def _lowerCamelCase ( self : Union[str, Any] ):
__UpperCamelCase = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__UpperCamelCase = FlaxBlenderbotSmallForConditionalGeneration(__A )
__UpperCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__UpperCamelCase = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__UpperCamelCase = lm_model(input_ids=__A , decoder_input_ids=__A )
__UpperCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , __A )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__UpperCamelCase = shift_tokens_right(__A , 1 , 2 )
__UpperCamelCase = np.equal(__A , 1 ).astype(np.floataa ).sum()
__UpperCamelCase = np.equal(__A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case ( __lowerCamelCase , unittest.TestCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =True
SCREAMING_SNAKE_CASE_ : Tuple =(
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =(FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self : List[Any] ):
__UpperCamelCase = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self : int ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__A , __A , __A )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__A , __A , __A )
def _lowerCamelCase ( self : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(__A , __A )
__UpperCamelCase = model_class(__A )
@jax.jit
def encode_jitted(__A : List[str] , __A : List[str]=None , **__A : Dict ):
return model.encode(input_ids=__A , attention_mask=__A )
with self.subTest('JIT Enabled' ):
__UpperCamelCase = encode_jitted(**__A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase = encode_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self : str ):
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = model_class(__A )
__UpperCamelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__UpperCamelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(__A : List[Any] , __A : Tuple , __A : List[Any] ):
return model.decode(
decoder_input_ids=__A , decoder_attention_mask=__A , encoder_outputs=__A , )
with self.subTest('JIT Enabled' ):
__UpperCamelCase = decode_jitted(**__A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase = decode_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self : str ):
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__UpperCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
__UpperCamelCase = model(__A )
self.assertIsNotNone(__A )
| 53 |
'''simple docstring'''
import random
def lowercase__ ( __lowercase : list , __lowercase : Optional[Any] ) -> tuple:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(__lowercase )
elif element > pivot:
greater.append(__lowercase )
else:
equal.append(__lowercase )
return less, equal, greater
def lowercase__ ( __lowercase : list , __lowercase : int ) -> Dict:
"""simple docstring"""
if index >= len(__lowercase ) or index < 0:
return None
__UpperCamelCase = items[random.randint(0 , len(__lowercase ) - 1 )]
__UpperCamelCase = 0
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = _partition(__lowercase , __lowercase )
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = len(__lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__lowercase , __lowercase )
# must be in larger
else:
return quick_select(__lowercase , index - (m + count) )
| 53 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
snake_case__ : Any = int(input('''Enter number: ''').strip())
print(f'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
| 274 | '''simple docstring'''
from sklearn.metrics import fa_score
import datasets
snake_case__ : str = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
snake_case__ : int = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
snake_case__ : Tuple = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32' ) ),
'references': datasets.Sequence(datasets.Value('int32' ) ),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=1 , snake_case_="binary" , snake_case_=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = fa_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ )
return {"f1": float(snake_case_ ) if score.size == 1 else score}
| 274 | 1 |
def _a ( ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : List[Any] = 0
for i in range(1 , 1001 ):
total += i**i
return str(UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 142 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Dict = logging.get_logger(__name__)
_A : Union[str, Any] = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Any = "vit_msn"
def __init__( self : Optional[Any] , A : Dict=7_6_8 , A : Union[str, Any]=1_2 , A : Optional[Any]=1_2 , A : List[Any]=3_0_7_2 , A : List[str]="gelu" , A : Optional[int]=0.0 , A : int=0.0 , A : int=0.02 , A : Tuple=1e-06 , A : int=2_2_4 , A : Union[str, Any]=1_6 , A : Dict=3 , A : Optional[Any]=True , **A : Optional[Any] , ) ->Dict:
super().__init__(**A )
lowerCamelCase__ : int = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : Optional[int] = layer_norm_eps
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Tuple = qkv_bias
| 142 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
return image
def UpperCamelCase ( snake_case__ : int ) -> List[Any]:
UpperCamelCase : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Optional[int]:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : str = val
def UpperCamelCase ( snake_case__ : str , snake_case__ : Union[str, Any] ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase : Optional[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCamelCase : int = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCamelCase : int = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
UpperCamelCase : Tuple = qkv_bias
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> Dict:
UpperCamelCase : str = 364 if 'coco' in model_name else 224
UpperCamelCase : Union[str, Any] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase : List[Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase : int = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase : List[str] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase : int = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase : Any = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCamelCase ( snake_case__ : int , snake_case__ : Dict=None , snake_case__ : int=False ) -> List[Any]:
UpperCamelCase : str = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase : int = tokenizer('\n' , add_special_tokens=snake_case__ ).input_ids[0]
UpperCamelCase , UpperCamelCase : Union[str, Any] = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
UpperCamelCase : Dict = BlipaForConditionalGeneration(snake_case__ ).eval()
UpperCamelCase : Optional[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase : Optional[Any] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase : List[Any] = original_model.state_dict()
UpperCamelCase : Tuple = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ )
if key.startswith('Qformer.bert' ):
UpperCamelCase : List[str] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase : Tuple = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase : Union[str, Any] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase : Optional[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase : Dict = key.replace('t5' , 'language' )
UpperCamelCase : Optional[int] = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
UpperCamelCase , UpperCamelCase : Any = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase : List[str] = load_demo_image()
UpperCamelCase : str = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
UpperCamelCase : Any = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(snake_case__ )
# create processor
UpperCamelCase : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=snake_case__ , image_std=snake_case__ )
UpperCamelCase : Any = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCamelCase : Optional[int] = processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase : Tuple = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase : str = hf_model(snake_case__ , snake_case__ ).logits
else:
UpperCamelCase : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase : Optional[int] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase : List[str] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase : Union[str, Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=snake_case__ )
else:
# cast to same type
UpperCamelCase : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase : Optional[int] = ''
UpperCamelCase : Union[str, Any] = tokenizer(snake_case__ , return_tensors='pt' ).input_ids.to(snake_case__ )
UpperCamelCase : str = original_model.generate({'image': original_pixel_values} )
UpperCamelCase : str = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , snake_case__ )
UpperCamelCase : Optional[int] = input_ids.shape[1]
UpperCamelCase : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
UpperCamelCase : Dict = [text.strip() for text in output_text]
print('HF generation:' , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 103 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCAmelCase = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 10_00,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__UpperCAmelCase = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 10_00,
"block_out_channels": [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__UpperCAmelCase = {
"sample_size": 2_56,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__UpperCAmelCase = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__UpperCAmelCase = {
"num_train_timesteps": 2_01,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__UpperCAmelCase = {
"num_train_timesteps": 1_51,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def A__ ( __lowerCamelCase ):
if isinstance(__lowerCamelCase, __lowerCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.skip_connection.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 )
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.norm.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.norm.bias''']
SCREAMING_SNAKE_CASE_ = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE_ = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = torch.load(__lowerCamelCase, map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = checkpoint['''time_embed.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''time_embed.0.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint['''time_embed.2.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE_ = checkpoint['''label_emb.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_blocks.0.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''input_blocks.0.0.bias''']
SCREAMING_SNAKE_CASE_ = unet_config['''down_block_types''']
SCREAMING_SNAKE_CASE_ = unet_config['''layers_per_block''']
SCREAMING_SNAKE_CASE_ = unet_config['''attention_head_dim''']
SCREAMING_SNAKE_CASE_ = unet_config['''block_out_channels''']
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = channels_list[0]
for i, layer_type in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = channels_list[i]
SCREAMING_SNAKE_CASE_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE_ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, has_skip=__lowerCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE_ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, has_skip=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = F'''down_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE_ = F'''input_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE_ = convert_attention(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE_ = F'''down_blocks.{i}.downsamplers.0'''
SCREAMING_SNAKE_CASE_ = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
current_layer += 1
SCREAMING_SNAKE_CASE_ = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE_ = '''mid_block.resnets.0'''
SCREAMING_SNAKE_CASE_ = '''middle_block.0'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = '''mid_block.attentions.0'''
SCREAMING_SNAKE_CASE_ = '''middle_block.1'''
SCREAMING_SNAKE_CASE_ = convert_attention(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = '''mid_block.resnets.1'''
SCREAMING_SNAKE_CASE_ = '''middle_block.2'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = unet_config['''up_block_types''']
for i, layer_type in enumerate(__lowerCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, has_skip=__lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer-1}.1'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, has_skip=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE_ = convert_attention(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
current_layer += 1
if i != len(__lowerCamelCase ) - 1:
SCREAMING_SNAKE_CASE_ = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE_ = F'''output_blocks.{current_layer-1}.2'''
SCREAMING_SNAKE_CASE_ = convert_resnet(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = checkpoint['''out.0.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''out.0.bias''']
SCREAMING_SNAKE_CASE_ = checkpoint['''out.2.weight''']
SCREAMING_SNAKE_CASE_ = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = strabool(args.class_cond)
__UpperCAmelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__UpperCAmelCase = None
__UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
__UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 299 |
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase ) if (len(__lowerCamelCase ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ), '''Stack'''.center(__lowerCamelCase ), '''Postfix'''.center(__lowerCamelCase ), sep=''' | ''', )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(__lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(__lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(__lowerCamelCase ) == 0:
stack.append(__lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(__lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(__lowerCamelCase ) # push x to stack
print(
x.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
while len(__lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), (''''''.join(__lowerCamelCase )).ljust(__lowerCamelCase ), sep=''' | ''', ) # Output in tabular format
return "".join(__lowerCamelCase ) # return Postfix as str
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = list(infix[::-1] ) # reverse the infix equation
for i in range(len(__lowerCamelCase ) ):
if infix[i] == "(":
SCREAMING_SNAKE_CASE_ = ''')''' # change "(" to ")"
elif infix[i] == ")":
SCREAMING_SNAKE_CASE_ = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(__lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__UpperCAmelCase = input("\nEnter an Infix Equation = ") # Input an Infix equation
__UpperCAmelCase = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 299 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # noqa: E741
'''simple docstring'''
while r - l > 1:
A_ : List[Any] = (l + r) // 2
if v[m] >= key:
A_ : Dict = m
else:
A_ : List[Any] = m # noqa: E741
return r
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
return 0
A_ : Tuple = [0] * len(lowerCamelCase__ )
A_ : Any = 1
A_ : Dict = v[0]
for i in range(1 , len(lowerCamelCase__ ) ):
if v[i] < tail[0]:
A_ : List[Any] = v[i]
elif v[i] > tail[length - 1]:
A_ : Any = v[i]
length += 1
else:
A_ : Tuple = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = prime_factors(lowerCamelCase__ )
if is_square_free(lowerCamelCase__ ):
return -1 if len(lowerCamelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCAmelCase__ : Dict = args.log_outputs
lowerCAmelCase__ : List[str] = "_".join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
lowerCAmelCase__ : int = load_metric('wer' )
lowerCAmelCase__ : str = load_metric('cer' )
# compute metrics
lowerCAmelCase__ : List[Any] = wer.compute(references=result['target'] , predictions=result['prediction'] )
lowerCAmelCase__ : str = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
lowerCAmelCase__ : List[str] = F'''WER: {wer_result}\nCER: {cer_result}'''
print(SCREAMING_SNAKE_CASE_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowerCAmelCase__ : Optional[Any] = F'''log_{dataset_id}_predictions.txt'''
lowerCAmelCase__ : Tuple = F'''log_{dataset_id}_targets.txt'''
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as p, open(SCREAMING_SNAKE_CASE_ , 'w' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(SCREAMING_SNAKE_CASE_ , with_indices=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCAmelCase__ : List[str] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowerCAmelCase__ : List[str] = re.sub(SCREAMING_SNAKE_CASE_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowerCAmelCase__ : Dict = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
lowerCAmelCase__ : List[str] = " ".join(text.split(SCREAMING_SNAKE_CASE_ ) )
return text
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Any = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id )
lowerCAmelCase__ : Dict = feature_extractor.sampling_rate
# resample audio
lowerCAmelCase__ : Any = dataset.cast_column('audio' , Audio(sampling_rate=SCREAMING_SNAKE_CASE_ ) )
# load eval pipeline
if args.device is None:
lowerCAmelCase__ : int = 0 if torch.cuda.is_available() else -1
lowerCAmelCase__ : Union[str, Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowerCAmelCase__ : List[Any] = prediction["text"]
lowerCAmelCase__ : int = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
lowerCAmelCase__ : List[str] = dataset.map(SCREAMING_SNAKE_CASE_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `\'en\'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `\'test\'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
lowerCamelCase__ = parser.parse_args()
main(args) | 212 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str ="llama"
a : List[str] =["past_key_values"]
def __init__( self , snake_case__=32_000 , snake_case__=4_096 , snake_case__=11_008 , snake_case__=32 , snake_case__=32 , snake_case__=None , snake_case__="silu" , snake_case__=2_048 , snake_case__=0.02 , snake_case__=1e-6 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=1 , snake_case__=False , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : Dict = num_key_value_heads
lowerCAmelCase : Optional[Any] = hidden_act
lowerCAmelCase : Optional[Any] = initializer_range
lowerCAmelCase : Any = rms_norm_eps
lowerCAmelCase : List[Any] = pretraining_tp
lowerCAmelCase : int = use_cache
lowerCAmelCase : List[str] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , tie_word_embeddings=snake_case__ , **snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
lowerCAmelCase : Optional[Any] = self.rope_scaling.get("type" , snake_case__ )
lowerCAmelCase : int = self.rope_scaling.get("factor" , snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(snake_case__ , snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 108 | 0 |
'''simple docstring'''
from __future__ import annotations
A__ : Tuple =8.9_8_8e9 # units = N * m^s * C^-2
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
_lowerCAmelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
_lowerCAmelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_lowerCAmelCase = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_lowerCAmelCase = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_lowerCAmelCase = (COULOMBS_CONSTANT * charge_product / abs(__SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
if not (isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = len(lowerCAmelCase )
_lowerCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
_lowerCAmelCase = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
_lowerCAmelCase = i
_lowerCAmelCase = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"], model_result["ss"]):
_lowerCAmelCase : Tuple = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], eager_mode=__a, multi_process=__a, )
_lowerCAmelCase : str = TensorFlowBenchmark(__a)
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = "sgugger/tiny-distilbert-classification"
_lowerCAmelCase : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, only_pretrain_model=__a, )
_lowerCAmelCase : Tuple = TensorFlowBenchmark(__a)
_lowerCAmelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : List[Any] = TensorFlowBenchmark(__a)
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], eager_mode=__a, multi_process=__a, )
_lowerCAmelCase : Any = TensorFlowBenchmark(__a, [config])
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
_lowerCAmelCase : int = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : Any = TensorFlowBenchmark(__a, [config])
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : List[Any] = TensorFlowBenchmark(__a)
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "sshleifer/tiny-gpt2"
_lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : str = TensorFlowBenchmark(__a, [config])
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = "patrickvonplaten/t5-tiny-random"
_lowerCAmelCase : List[str] = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], multi_process=__a, )
_lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(__a, configs=[config])
_lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0, "Cannot do xla on CPU.")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = "sshleifer/tiny-gpt2"
_lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], training=__a, inference=__a, sequence_lengths=[8], batch_sizes=[1], use_xla=__a, multi_process=__a, )
_lowerCAmelCase : Tuple = TensorFlowBenchmark(__a)
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__a, save_to_csv=__a, sequence_lengths=[8], batch_sizes=[1], inference_time_csv_file=os.path.join(__a, "inf_time.csv"), inference_memory_csv_file=os.path.join(__a, "inf_mem.csv"), env_info_csv_file=os.path.join(__a, "env.csv"), multi_process=__a, )
_lowerCAmelCase : List[str] = TensorFlowBenchmark(__a)
benchmark.run()
self.assertTrue(Path(os.path.join(__a, "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(__a, "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(__a, "env.csv")).exists())
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__a):
self.assertTrue(hasattr(__a, "sequential"))
self.assertTrue(hasattr(__a, "cumulative"))
self.assertTrue(hasattr(__a, "current"))
self.assertTrue(hasattr(__a, "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID], inference=__a, sequence_lengths=[8], batch_sizes=[1], log_filename=os.path.join(__a, "log.txt"), log_print=__a, trace_memory_line_by_line=__a, eager_mode=__a, multi_process=__a, )
_lowerCAmelCase : List[Any] = TensorFlowBenchmark(__a)
_lowerCAmelCase : Tuple = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(__a, "log.txt")).exists())
| 36 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : str ):
super().__init__()
__lowercase = model
__lowercase = 2
__lowercase = nn.Linear(self.model.config.hidden_size, self.num_labels )
def _lowercase ( self : Optional[int] ):
pass
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str, UpperCamelCase_ : str) -> str:
'''simple docstring'''
__lowercase = LongformerModel.from_pretrained(UpperCamelCase_)
__lowercase = LightningModel(UpperCamelCase_)
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
lightning_model.load_state_dict(ckpt["state_dict"])
# init longformer question answering model
__lowercase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase_)
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict())
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict())
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase_)
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 329 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 329 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = TextToVideoSDPipeline
a__ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
a__ : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a__ : int = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase__ ( self) -> Optional[Any]:
torch.manual_seed(0)
__UpperCamelCase :str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__UpperCamelCase :Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
__UpperCamelCase :Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
__UpperCamelCase :Optional[Any] = CLIPTextModel(__lowercase)
__UpperCamelCase :Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
__UpperCamelCase :Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0) -> Optional[int]:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :List[Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :Tuple = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Optional[int] = self.get_dummy_components()
__UpperCamelCase :Dict = TextToVideoSDPipeline(**__lowercase)
__UpperCamelCase :Any = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :Optional[Any] = self.get_dummy_inputs(__lowercase)
__UpperCamelCase :int = '''np'''
__UpperCamelCase :List[str] = sd_pipe(**__lowercase).frames
__UpperCamelCase :Optional[Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__UpperCamelCase :str = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def UpperCamelCase__ ( self) -> Tuple:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=3E-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase , expected_max_diff=1E-2)
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Union[str, Any]:
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> List[str]:
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''')
__UpperCamelCase :List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Optional[Any] = '''Spiderman is surfing'''
__UpperCamelCase :Union[str, Any] = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=25 , output_type='''pt''').frames
__UpperCamelCase :Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''')
__UpperCamelCase :Union[str, Any] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''')
__UpperCamelCase :str = pipe.to('''cuda''')
__UpperCamelCase :Union[str, Any] = '''Spiderman is surfing'''
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :List[Any] = pipe(__lowercase , generator=__lowercase , num_inference_steps=2 , output_type='''pt''').frames
__UpperCamelCase :Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video).mean() < 5E-2
| 43 | """simple docstring"""
class __snake_case :
def __init__( self , lowercase , lowercase=None , lowercase=None) -> List[str]:
'''simple docstring'''
a__: Dict = data
a__: List[Any] = previous
a__: Any = next_node
def __str__( self) -> str:
'''simple docstring'''
return f'{self.data}'
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
return self.data
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
return self.next
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
return self.previous
class __snake_case :
def __init__( self , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = head
def __iter__( self) -> List[Any]:
'''simple docstring'''
return self
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
a__: Dict = self.current.get_data()
a__: Optional[Any] = self.current.get_next()
return value
class __snake_case :
def __init__( self) -> Dict:
'''simple docstring'''
a__: List[Any] = None # First node in list
a__: Optional[int] = None # Last node in list
def __str__( self) -> Optional[Any]:
'''simple docstring'''
a__: Dict = self.head
a__: Optional[Any] = []
while current is not None:
nodes.append(current.get_data())
a__: str = current.get_next()
return " ".join(str(lowercase) for node in nodes)
def __contains__( self , lowercase) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = self.head
while current:
if current.get_data() == value:
return True
a__: Dict = current.get_next()
return False
def __iter__( self) -> int:
'''simple docstring'''
return LinkedListIterator(self.head)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
a__: Optional[Any] = node
a__: Optional[Any] = node
else:
self.insert_before_node(self.head , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(lowercase)
else:
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Tuple = Node(lowercase)
if self.head is None:
self.set_head(lowercase)
else:
self.set_tail(lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Union[str, Any] = node
a__: Optional[Any] = node.previous
if node.get_previous() is None:
a__: Tuple = node_to_insert
else:
a__: int = node_to_insert
a__: Optional[int] = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Optional[int] = node
a__: Tuple = node.next
if node.get_next() is None:
a__: Optional[int] = node_to_insert
else:
a__: Any = node_to_insert
a__: str = node_to_insert
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Any = 1
a__: Tuple = Node(lowercase)
a__: Tuple = self.head
while node:
if current_position == position:
self.insert_before_node(lowercase , lowercase)
return
current_position += 1
a__: List[Any] = node.next
self.insert_after_node(self.tail , lowercase)
def lowerCamelCase_ ( self , lowercase) -> Node:
'''simple docstring'''
a__: Tuple = self.head
while node:
if node.get_data() == item:
return node
a__: List[str] = node.get_next()
raise Exception('Node not found')
def lowerCamelCase_ ( self , lowercase) -> Any:
'''simple docstring'''
if (node := self.get_node(lowercase)) is not None:
if node == self.head:
a__: Any = self.head.get_next()
if node == self.tail:
a__: List[Any] = self.tail.get_previous()
self.remove_node_pointers(lowercase)
@staticmethod
def lowerCamelCase_ ( lowercase) -> None:
'''simple docstring'''
if node.get_next():
a__: Any = node.previous
if node.get_previous():
a__: List[str] = node.next
a__: int = None
a__: Union[str, Any] = None
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return self.head is None
def __a ( ) ->None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290 | 0 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__UpperCamelCase : List[str] = '''base_with_context'''
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=A_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : Tuple = weights[f'layers_{lyr_num}']
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ : Any = ly_weight['''attention''']
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=A_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCAmelCase__ : Tuple = weights[f'layers_{lyr_num}']
lowerCAmelCase__ : int = ly_weight['''attention''']
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase__ : str = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ : Dict = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=A_ )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCAmelCase__ : List[Any] = weights[f'layers_{lyr_num}']
lowerCAmelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCAmelCase__ : Any = ly_weight['''self_attention''']
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase__ : List[str] = ly_weight['''MultiHeadDotProductAttention_0''']
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
lowerCAmelCase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
lowerCAmelCase__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
lowerCAmelCase__ : int = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
lowerCAmelCase__ : int = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
lowerCAmelCase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
lowerCAmelCase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
lowerCAmelCase__ : List[Any] = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCAmelCase__ : Dict = jnp.tree_util.tree_map(onp.array , A_ )
lowerCAmelCase__ : List[str] = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
lowerCAmelCase__ : List[Any] = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
lowerCAmelCase__ : int = inference.parse_training_gin_file(A_ , A_ )
lowerCAmelCase__ : Optional[Any] = inference.InferenceModel(args.checkpoint_path , A_ )
lowerCAmelCase__ : int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
lowerCAmelCase__ : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCAmelCase__ : Optional[int] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
lowerCAmelCase__ : int = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCAmelCase__ : List[Any] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , A_ )
lowerCAmelCase__ : List[str] = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , A_ )
lowerCAmelCase__ : str = load_decoder(ta_checkpoint['''target''']['''decoder'''] , A_ )
lowerCAmelCase__ : Any = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
lowerCAmelCase__ : Any = SpectrogramDiffusionPipeline(
notes_encoder=A_ , continuous_encoder=A_ , decoder=A_ , scheduler=A_ , melgan=A_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__UpperCamelCase : List[str] = parser.parse_args()
main(args)
| 74 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 74 | 1 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase : List[Any] = 1.054571817e-34 # unit of ℏ : J * s
_lowerCAmelCase : Tuple = 3e8 # unit of c : m * s^-1
def __snake_case ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
A_ : Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
A_ : Tuple = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
A_ : List[str] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300 |
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class lowerCAmelCase ( __UpperCamelCase ):
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : int=13 , UpperCAmelCase : Dict=7 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=99 , UpperCAmelCase : int=32 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : int=64 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : str=512 , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Union[str, Any]=0.0_2 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=2 , UpperCAmelCase : Any=2 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Tuple=1 , ) -> List[Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Union[str, Any] = seq_length
lowerCamelCase__ : Any = is_training
lowerCamelCase__ : List[Any] = use_input_mask
lowerCamelCase__ : str = use_token_type_ids
lowerCamelCase__ : Optional[int] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : int = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : int = max_position_embeddings
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Any = type_sequence_label_size
lowerCamelCase__ : int = initializer_range
lowerCamelCase__ : Tuple = num_labels
lowerCamelCase__ : Tuple = num_choices
lowerCamelCase__ : Optional[int] = scope
lowerCamelCase__ : int = q_groups
lowerCamelCase__ : List[Any] = k_groups
lowerCamelCase__ : Optional[int] = v_groups
lowerCamelCase__ : Any = post_attention_groups
lowerCamelCase__ : Union[str, Any] = intermediate_groups
lowerCamelCase__ : Dict = output_groups
def A_ ( self : int ) -> List[Any]:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Any = None
if self.use_input_mask:
lowerCamelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : List[str] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : List[str] ) -> Union[str, Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def A_ ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple ) -> Union[str, Any]:
lowerCamelCase__ : str = SqueezeBertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Any ) -> List[str]:
lowerCamelCase__ : List[Any] = SqueezeBertForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : str ) -> str:
lowerCamelCase__ : int = SqueezeBertForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : int = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ) -> Any:
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Any = SqueezeBertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : int = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self.num_labels
lowerCamelCase__ : int = SqueezeBertForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : str ) -> Dict:
lowerCamelCase__ : Union[str, Any] = self.num_choices
lowerCamelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase__ : List[str] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) : List[Any] = config_and_inputs
lowerCamelCase__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCAmelCase__ = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def A_ ( self : List[str] ) -> Optional[int]:
lowerCamelCase__ : List[Any] = SqueezeBertModelTester(self )
lowerCamelCase__ : Dict = ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A_ ( self : Tuple ) -> Optional[int]:
self.config_tester.run_common_tests()
def A_ ( self : List[Any] ) -> Optional[int]:
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCAmelCase )
def A_ ( self : int ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCAmelCase )
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCAmelCase )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCAmelCase )
@slow
def A_ ( self : Any ) -> Any:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Dict = SqueezeBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> Tuple:
lowerCamelCase__ : Optional[int] = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCamelCase__ : Optional[int] = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
lowerCamelCase__ : Any = model(UpperCAmelCase )[0]
lowerCamelCase__ : Dict = torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCamelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-4 ) )
| 45 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
lowerCamelCase__ : str = len(bin(_UpperCAmelCase )[3:] )
lowerCamelCase__ : Dict = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
lowerCamelCase__ : Optional[int] = (
(
'1'
+ '0' * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Tuple = MobileBertTokenizer
lowerCAmelCase : Any = MobileBertTokenizerFast
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = True
lowerCAmelCase : List[str] = filter_non_english
lowerCAmelCase : Tuple = 'google/mobilebert-uncased'
def __lowercase ( self : List[str] ):
super().setUp()
_a : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_a : Optional[int] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __lowercase ( self : Dict ,_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Any ):
_a : List[str] = self.tokenizer_class(self.vocab_file )
_a : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[9, 6, 7, 12, 10, 11] )
def __lowercase ( self : Dict ):
if not self.test_rust_tokenizer:
return
_a : Dict = self.get_tokenizer()
_a : Optional[Any] = self.get_rust_tokenizer()
_a : List[Any] = 'UNwant\u00E9d,running'
_a : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : str = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Any = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : List[str] = self.get_rust_tokenizer()
_a : Tuple = tokenizer.encode(_UpperCAmelCase )
_a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
# With lower casing
_a : List[str] = self.get_tokenizer(do_lower_case=_UpperCAmelCase )
_a : Tuple = self.get_rust_tokenizer(do_lower_case=_UpperCAmelCase )
_a : int = 'UNwant\u00E9d,running'
_a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : int = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : List[str] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : str = self.get_rust_tokenizer()
_a : Tuple = tokenizer.encode(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[str] ):
_a : Any = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def __lowercase ( self : Tuple ):
_a : str = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : Optional[Any] ):
_a : Optional[int] = BasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def __lowercase ( self : Any ):
_a : str = BasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : str ):
_a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def __lowercase ( self : Union[str, Any] ):
_a : List[str] = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : Optional[Any] ):
_a : List[str] = BasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : List[str] ):
_a : Any = BasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowercase ( self : List[str] ):
_a : List[str] = BasicTokenizer(do_lower_case=_UpperCAmelCase ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __lowercase ( self : Optional[Any] ):
_a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
_a : Dict = {}
for i, token in enumerate(_UpperCAmelCase ):
_a : Tuple = i
_a : Union[str, Any] = WordpieceTokenizer(vocab=_UpperCAmelCase ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def __lowercase ( self : Optional[Any] ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __lowercase ( self : int ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __lowercase ( self : str ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __lowercase ( self : int ):
_a : List[str] = self.get_tokenizer()
_a : int = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
@slow
def __lowercase ( self : Dict ):
_a : List[Any] = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
_a : Any = tokenizer.encode('sequence builders' ,add_special_tokens=_UpperCAmelCase )
_a : Tuple = tokenizer.encode('multi-sequence build' ,add_special_tokens=_UpperCAmelCase )
_a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
_a : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ,_UpperCAmelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __lowercase ( self : Optional[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : int = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Union[str, Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_a : List[str] = tokenizer_r.encode_plus(
_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,)
_a : Optional[int] = tokenizer_r.do_lower_case if hasattr(_UpperCAmelCase ,'do_lower_case' ) else False
_a : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def __lowercase ( self : Optional[Any] ):
_a : List[str] = ['的', '人', '有']
_a : str = ''.join(_UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_a : Optional[int] = True
_a : Dict = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Optional[int] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = tokenizer_p.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : int = tokenizer_r.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : int = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase )
_a : Dict = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Any = False
_a : Tuple = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
_a : Union[str, Any] = tokenizer_r.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer_p.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : str = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase )
_a : List[str] = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
_a : int = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_UpperCAmelCase )
]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
| 89 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = 0
if start < end:
__lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase , __lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ )
return count
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = 0
__lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase = start - 1
for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__lowerCAmelCase = new_pivot_index + 1
__lowerCAmelCase = a[new_pivot_index]
__lowerCAmelCase = a[index]
__lowerCAmelCase = temp
__lowerCAmelCase = a[new_pivot_index + 1]
__lowerCAmelCase = a[end]
__lowerCAmelCase = temp
return new_pivot_index + 1, count
UpperCamelCase__ = TemporaryFile()
UpperCamelCase__ = 100 # 1000 elements are to be sorted
UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation
UpperCamelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
UpperCamelCase__ = np.load(outfile)
UpperCamelCase__ = len(M) - 1
UpperCamelCase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 92 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _lowerCAmelCase ( lowercase , lowercase , lowercase=[] ) -> List[Any]:
__lowerCAmelCase = size[0] - overlap_pixels * 2
__lowerCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__lowerCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
__lowerCAmelCase = np.pad(__snake_case , mode="""linear_ramp""" , pad_width=__snake_case , end_values=0 )
if "l" in remove_borders:
__lowerCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__lowerCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__lowerCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__lowerCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Any:
return max(__snake_case , min(__snake_case , __snake_case ) )
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Tuple:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Dict:
__lowerCAmelCase = list(__snake_case )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__lowerCAmelCase = clamp_rect(__snake_case , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Dict:
__lowerCAmelCase = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__snake_case , (original_slice, 0) )
return result
def _lowerCAmelCase ( lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__lowerCAmelCase = tile.crop(__snake_case )
return tile
def _lowerCAmelCase ( lowercase , lowercase ) -> List[Any]:
__lowerCAmelCase = n % d
return n - divisor
class _UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 3_50,):
'''simple docstring'''
super().__init__(
vae=__SCREAMING_SNAKE_CASE,text_encoder=__SCREAMING_SNAKE_CASE,tokenizer=__SCREAMING_SNAKE_CASE,unet=__SCREAMING_SNAKE_CASE,low_res_scheduler=__SCREAMING_SNAKE_CASE,scheduler=__SCREAMING_SNAKE_CASE,max_noise_level=__SCREAMING_SNAKE_CASE,)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice),x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice),y * tile_size ),
min(image.size[0],(x + 1) * tile_size ),
min(image.size[1],(y + 1) * tile_size ),
)
__lowerCAmelCase = add_overlap_rect(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,image.size )
__lowerCAmelCase = image.crop(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__lowerCAmelCase = translated_slice_x - (original_image_slice / 2)
__lowerCAmelCase = max(0,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = squeeze_tile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = to_input.size
__lowerCAmelCase = to_input.resize((tile_size, tile_size),Image.BICUBIC )
__lowerCAmelCase = super(__SCREAMING_SNAKE_CASE,self ).__call__(image=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ).images[0]
__lowerCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4),Image.BICUBIC )
__lowerCAmelCase = unsqueeze_tile(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4),Image.BICUBIC )
__lowerCAmelCase = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
__lowerCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]),tile_border * 4,remove_borders=__SCREAMING_SNAKE_CASE ),mode="""L""",)
final_image.paste(
__SCREAMING_SNAKE_CASE,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4),__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 75,__SCREAMING_SNAKE_CASE = 9.0,__SCREAMING_SNAKE_CASE = 50,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = 1,__SCREAMING_SNAKE_CASE = 0.0,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = 1,__SCREAMING_SNAKE_CASE = 1_28,__SCREAMING_SNAKE_CASE = 32,__SCREAMING_SNAKE_CASE = 32,):
'''simple docstring'''
__lowerCAmelCase = Image.new("""RGB""",(image.size[0] * 4, image.size[1] * 4) )
__lowerCAmelCase = math.ceil(image.size[0] / tile_size )
__lowerCAmelCase = math.ceil(image.size[1] / tile_size )
__lowerCAmelCase = tcx * tcy
__lowerCAmelCase = 0
for y in range(__SCREAMING_SNAKE_CASE ):
for x in range(__SCREAMING_SNAKE_CASE ):
self._process_tile(
__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,prompt=__SCREAMING_SNAKE_CASE,num_inference_steps=__SCREAMING_SNAKE_CASE,guidance_scale=__SCREAMING_SNAKE_CASE,noise_level=__SCREAMING_SNAKE_CASE,negative_prompt=__SCREAMING_SNAKE_CASE,num_images_per_prompt=__SCREAMING_SNAKE_CASE,eta=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,latents=__SCREAMING_SNAKE_CASE,)
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def _lowerCAmelCase ( ) -> Optional[Any]:
__lowerCAmelCase = """stabilityai/stable-diffusion-x4-upscaler"""
__lowerCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(__snake_case , revision="""fp16""" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to("""cuda""" )
__lowerCAmelCase = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(lowercase ):
print(f'progress: {obj["progress"]:.4f}' )
obj["image"].save("""diffusers_library_progress.jpg""" )
__lowerCAmelCase = pipe(image=__snake_case , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__snake_case )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 356 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Dict =MvpTokenizer
a : int =MvpTokenizerFast
a : Any =True
a : int =filter_roberta_detectors
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,max_length=len(__SCREAMING_SNAKE_CASE ),padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9),batch.input_ids.shape )
self.assertEqual((2, 9),batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""",__SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""labels""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""",__SCREAMING_SNAKE_CASE )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE,max_length=32,padding="""max_length""",return_tensors="""pt""" )
self.assertEqual(32,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""],padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape,(2, 10_24) )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization."""]
__lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,text_target=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = inputs["""input_ids"""]
__lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """A, <mask> AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ),sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ),sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ),)
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 46 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''switch_transformers'''
UpperCamelCase__ : Optional[Any] = ['''past_key_values''']
UpperCamelCase__ : Optional[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str=32_128 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=64 , __SCREAMING_SNAKE_CASE : Optional[int]=2_048 , __SCREAMING_SNAKE_CASE : List[str]=64 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Tuple=8 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.01 , __SCREAMING_SNAKE_CASE : Dict="float32" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : int=128 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=1E-6 , __SCREAMING_SNAKE_CASE : Dict=0.0_01 , __SCREAMING_SNAKE_CASE : List[str]=0.0_01 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Optional[int]="relu" , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : int=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = vocab_size
__a = d_model
__a = d_kv
__a = d_ff
__a = num_sparse_encoder_layers
__a = num_layers
__a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__a = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__a = self.num_layers // self.num_sparse_encoder_layers
else:
__a = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__a = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__a = self.num_decoder_layers # HACK: this will create 0 sparse layers
__a = num_heads
__a = num_experts
__a = expert_capacity
__a = router_bias
__a = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
__a = router_dtype
__a = router_ignore_padding_tokens
__a = relative_attention_num_buckets
__a = relative_attention_max_distance
__a = dropout_rate
__a = layer_norm_epsilon
__a = initializer_factor
__a = feed_forward_proj
__a = use_cache
__a = add_router_probs
__a = router_z_loss_coef
__a = router_aux_loss_coef
__a = self.feed_forward_proj.split('''-''')
__a = act_info[-1]
__a = act_info[0] == '''gated'''
if len(__SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__a = '''gelu_new'''
super().__init__(
pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
| 49 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=2 , __a=3 , __a=4 , __a=2 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=36 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=6 , __a=6 , __a=3 , __a=4 , __a=None , __a=1000 , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : int = batch_size
__a : Any = num_channels
__a : Optional[int] = image_size
__a : Dict = patch_size
__a : int = is_training
__a : Union[str, Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Dict = use_labels
__a : str = vocab_size
__a : List[Any] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : str = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : List[str] = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Any = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Any = coordinate_size
__a : List[Any] = shape_size
__a : Optional[int] = num_labels
__a : Dict = num_choices
__a : Union[str, Any] = scope
__a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__a : Optional[int] = text_seq_length
__a : Any = (image_size // patch_size) ** 2 + 1
__a : Dict = self.text_seq_length + self.image_seq_length
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__a : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__a : Any = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__a : List[Any] = bbox[i, j, 3]
__a : Tuple = bbox[i, j, 1]
__a : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__a : int = bbox[i, j, 2]
__a : Dict = bbox[i, j, 0]
__a : int = tmp_coordinate
__a : Optional[int] = tf.constant(__a )
__a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_input_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__a : Optional[Any] = None
__a : Optional[int] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__a : int = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = TFLayoutLMvaModel(config=__a )
# text + image
__a : List[Any] = model(__a , pixel_values=__a , training=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , training=__a , )
__a : Optional[int] = model(__a , bbox=__a , pixel_values=__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__a : Any = model(__a , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__a : str = model({'pixel_values': pixel_values} , training=__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = TFLayoutLMvaForSequenceClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[Any] = TFLayoutLMvaForTokenClassification(config=__a )
__a : List[str] = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , training=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = 2
__a : Any = TFLayoutLMvaForQuestionAnswering(config=__a )
__a : Any = model(
__a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , training=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Dict = config_and_inputs
__a : Any = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
A_ = (
{"document-question-answering": TFLayoutLMvaForQuestionAnswering, "feature-extraction": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
return True
def __UpperCAmelCase ( self , __a , __a , __a=False ):
'''simple docstring'''
__a : str = copy.deepcopy(__a )
if model_class in get_values(__a ):
__a : str = {
k: tf.tile(tf.expand_dims(__a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(__a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__a ):
__a : Optional[int] = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__a : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Any = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(__a ):
__a : Union[str, Any] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = TFLayoutLMvaModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
if getattr(__a , 'hf_compute_loss' , __a ):
# The number of elements in the loss should be the same as the number of elements in the label
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=__a )[0]
]
__a : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : Dict = prepared_for_class.pop('input_ids' )
__a : Tuple = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__a : int = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__a : Union[str, Any] = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__a : List[Any] = -100
__a : List[str] = tf.convert_to_tensor(__a )
__a : Any = model(__a , **__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__a : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
__a : str = model(__a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__a : Tuple = self._prepare_for_class(inputs_dict.copy() , __a , return_labels=__a )
# Get keys that were added with the _prepare_for_class function
__a : Dict = prepared_for_class.keys() - inputs_dict.keys()
__a : Any = inspect.signature(model.call ).parameters
__a : str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__a : List[Any] = {0: 'input_ids'}
for label_key in label_keys:
__a : List[Any] = signature_names.index(__a )
__a : Union[str, Any] = label_key
__a : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__a : Union[str, Any] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__a : Optional[Any] = prepared_for_class[value]
__a : str = tuple(__a )
# Send to model
__a : Tuple = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a : Any = type
self.model_tester.create_and_check_model(__a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
__a , __a , __a , __a , __a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
__a , __a , __a , __a , __a , __a , __a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = TFLayoutLMvaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__a : Tuple = self.default_image_processor
__a : List[Any] = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' ).pixel_values
__a : Union[str, Any] = tf.constant([[1, 2]] )
__a : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__a : Tuple = model(input_ids=__a , bbox=__a , pixel_values=__a , training=__a )
# verify the logits
__a : List[Any] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , __a )
__a : Optional[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
| 27 | 0 |
"""simple docstring"""
import numpy as np
def __UpperCAmelCase ( lowercase ,lowercase ):
"""simple docstring"""
return np.where(vector > 0 ,lowercase ,(alpha * (np.exp(lowercase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 | """simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=lowercase )
_UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowercase )
env_command_parser(subparsers=lowercase )
launch_command_parser(subparsers=lowercase )
tpu_command_parser(subparsers=lowercase )
test_command_parser(subparsers=lowercase )
# Let's go
_UpperCAmelCase = parser.parse_args()
if not hasattr(lowercase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowercase )
if __name__ == "__main__":
main()
| 30 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Tuple = """sew-d"""
def __init__( self , __snake_case=32 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case=2 , __snake_case=512 , __snake_case=256 , __snake_case=True , __snake_case=True , __snake_case=("p2c", "c2p") , __snake_case="layer_norm" , __snake_case="gelu_python" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.1 , __snake_case=0.02 , __snake_case=1e-7 , __snake_case=1e-5 , __snake_case="group" , __snake_case="gelu" , __snake_case=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __snake_case=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __snake_case=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __snake_case=False , __snake_case=128 , __snake_case=16 , __snake_case=True , __snake_case=0.05 , __snake_case=10 , __snake_case=2 , __snake_case=0.0 , __snake_case=10 , __snake_case=0 , __snake_case="mean" , __snake_case=False , __snake_case=False , __snake_case=256 , __snake_case=0 , __snake_case=1 , __snake_case=2 , **__snake_case , ):
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
_SCREAMING_SNAKE_CASE : int = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_norm
_SCREAMING_SNAKE_CASE : List[Any] = feat_extract_activation
_SCREAMING_SNAKE_CASE : List[Any] = list(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = list(__snake_case )
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(__snake_case )
_SCREAMING_SNAKE_CASE : List[Any] = conv_bias
_SCREAMING_SNAKE_CASE : Dict = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim )
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = squeeze_factor
_SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Dict = position_buckets
_SCREAMING_SNAKE_CASE : str = share_att_key
_SCREAMING_SNAKE_CASE : Any = relative_attention
_SCREAMING_SNAKE_CASE : Any = norm_rel_ebd
_SCREAMING_SNAKE_CASE : Optional[Any] = list(__snake_case )
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout
_SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
_SCREAMING_SNAKE_CASE : Dict = activation_dropout
_SCREAMING_SNAKE_CASE : Tuple = feat_proj_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = final_dropout
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
_SCREAMING_SNAKE_CASE : Any = feature_layer_norm_eps
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : str = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : Optional[int] = apply_spec_augment
_SCREAMING_SNAKE_CASE : Dict = mask_time_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = mask_time_length
_SCREAMING_SNAKE_CASE : Optional[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : Dict = mask_feature_prob
_SCREAMING_SNAKE_CASE : Any = mask_feature_length
_SCREAMING_SNAKE_CASE : Tuple = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : Tuple = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Dict = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Tuple = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : Tuple = classifier_proj_size
@property
def UpperCAmelCase_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 200 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case="resnet50" , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=True , __snake_case=True , ):
_SCREAMING_SNAKE_CASE : Tuple = parent
_SCREAMING_SNAKE_CASE : Optional[int] = out_indices if out_indices is not None else [4]
_SCREAMING_SNAKE_CASE : str = stage_names
_SCREAMING_SNAKE_CASE : List[str] = out_features
_SCREAMING_SNAKE_CASE : int = backbone
_SCREAMING_SNAKE_CASE : Any = batch_size
_SCREAMING_SNAKE_CASE : List[str] = image_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
_SCREAMING_SNAKE_CASE : int = use_pretrained_backbone
_SCREAMING_SNAKE_CASE : Optional[Any] = is_training
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, pixel_values
def UpperCAmelCase_ ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
_SCREAMING_SNAKE_CASE : Optional[int] = TimmBackbone(config=__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[Any] = model(__snake_case )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = config_and_inputs
_SCREAMING_SNAKE_CASE : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowercase__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = (TimmBackbone,) if is_torch_available() else ()
A_ : Tuple = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
A_ : Optional[Any] = False
A_ : List[Any] = False
A_ : Dict = False
A_ : int = False
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Any = TimmBackboneModelTester(self )
_SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def UpperCAmelCase_ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Optional[int] = """resnet18"""
_SCREAMING_SNAKE_CASE : Tuple = """microsoft/resnet-18"""
_SCREAMING_SNAKE_CASE : List[str] = AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = AutoBackbone.from_pretrained(__snake_case )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case , out_indices=[1, 2, 3] )
_SCREAMING_SNAKE_CASE : Union[str, Any] = AutoBackbone.from_pretrained(__snake_case , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : List[str] = model_class(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : int = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Tuple = True
_SCREAMING_SNAKE_CASE : List[str] = self.has_attentions
# no need to test all models as different heads yield the same functionality
_SCREAMING_SNAKE_CASE : str = self.all_model_classes[0]
_SCREAMING_SNAKE_CASE : str = model_class(__snake_case )
model.to(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Tuple = model(**__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
_SCREAMING_SNAKE_CASE : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__snake_case )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : str = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : List[str] = model(**__snake_case )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(__snake_case )
_SCREAMING_SNAKE_CASE : Optional[Any] = None
_SCREAMING_SNAKE_CASE : Tuple = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**__snake_case )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_SCREAMING_SNAKE_CASE : str = copy.deepcopy(__snake_case )
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
_SCREAMING_SNAKE_CASE : List[Any] = model(**__snake_case )
| 200 | 1 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : List[str] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f"{test_file} instead." )
lowerCAmelCase_ : str = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
lowerCAmelCase_ : str = components[:-1] + [test_fn.replace('.py' , '' )]
lowerCAmelCase_ : List[str] = '.'.join(lowerCAmelCase__ )
return test_module_path
def UpperCamelCase_ ( lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
lowerCAmelCase_ : List[str] = get_module_path(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = importlib.import_module(lowerCAmelCase__ )
return test_module
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Optional[Any] = get_test_module(lowerCAmelCase__ )
for attr in dir(lowerCAmelCase__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : List[Any] = get_test_module(lowerCAmelCase__ )
for attr in dir(lowerCAmelCase__ ):
lowerCAmelCase_ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowerCAmelCase_ : Optional[Any] = getattr(lowerCAmelCase__ , 'all_model_classes' , [] )
if len(lowerCAmelCase__ ) > 0:
test_classes.append(lowerCAmelCase__ )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Any = get_test_classes(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Any = test_class()
if hasattr(lowerCAmelCase__ , 'setUp' ):
test.setUp()
lowerCAmelCase_ : str = None
if hasattr(lowerCAmelCase__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowerCAmelCase_ : int = test.model_tester.__class__
return model_tester
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = get_test_classes(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCAmelCase__ )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = get_test_classes_for_model(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = []
for test_class in test_classes:
lowerCAmelCase_ : Optional[int] = get_model_tester_from_test_class(lowerCAmelCase__ )
if tester_class is not None:
tester_classes.append(lowerCAmelCase__ )
# sort with class names
return sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x.__name__ )
def UpperCamelCase_ ( lowerCAmelCase__ : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Dict = get_test_classes(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = {test_class: get_model_tester_from_test_class(lowerCAmelCase__ ) for test_class in test_classes}
return test_tester_mapping
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = get_model_classes(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
model_class: get_test_classes_for_model(lowerCAmelCase__ , lowerCAmelCase__ ) for model_class in model_classes
}
return model_test_mapping
def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = get_model_classes(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {
model_class: get_tester_classes_for_model(lowerCAmelCase__ , lowerCAmelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Any:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return o
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return o.__name__
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return [to_json(lowerCAmelCase__ ) for x in o]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return {to_json(lowerCAmelCase__ ): to_json(lowerCAmelCase__ ) for k, v in o.items()}
else:
return o
| 289 |
"""simple docstring"""
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowercase__ : str = logging.get_logger(__name__)
def UpperCamelCase_ ( lowerCAmelCase__ : bool , lowerCAmelCase__ : bool ) -> List[Any]:
"""simple docstring"""
def run_func(lowerCAmelCase__ : int ):
@wraps(lowerCAmelCase__ )
def run_in_eager_mode(*lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : int ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
@wraps(lowerCAmelCase__ )
@tf.function(experimental_compile=lowerCAmelCase__ )
def run_in_graph_mode(*lowerCAmelCase__ : Dict , **lowerCAmelCase__ : Any ):
return func(*lowerCAmelCase__ , **lowerCAmelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCAmelCase_ : Dict = random.Random()
lowerCAmelCase_ : List[str] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = "TensorFlow"
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return tf.__version__
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
# initialize GPU on separate process
lowerCAmelCase_ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : List[str] = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_inference )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : Any = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_train )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : Optional[Any] = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_inference )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
lowerCAmelCase_ : Optional[int] = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_train )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Any = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase_ : Union[str, Any] = (
hasattr(SCREAMING_SNAKE_CASE_ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase_ : Any = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase_ : Any = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase_ : Dict = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase_ : str = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase_ : List[Any] = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase_ : Tuple = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
lowerCAmelCase_ : Dict = (
hasattr(SCREAMING_SNAKE_CASE_ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCAmelCase_ : Optional[Any] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCAmelCase_ : int = __import__('transformers' , fromlist=[model_class] )
lowerCAmelCase_ : Any = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
lowerCAmelCase_ : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
lowerCAmelCase_ : int = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , 'vocab_size' ) else config.encoder.vocab_size
lowerCAmelCase_ : Optional[Any] = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : Optional[int] = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : str = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
lowerCAmelCase_ : int = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(SCREAMING_SNAKE_CASE_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCAmelCase_ : Dict = timeit.repeat(
SCREAMING_SNAKE_CASE_ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
lowerCAmelCase_ : Union[str, Any] = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
lowerCAmelCase_ : Tuple = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
lowerCAmelCase_ : int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCAmelCase_ : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = meminfo.used
lowerCAmelCase_ : int = Memory(SCREAMING_SNAKE_CASE_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
lowerCAmelCase_ : Optional[int] = None
else:
lowerCAmelCase_ : Union[str, Any] = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = Memory(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCAmelCase_ : List[Any] = stop_memory_tracing(SCREAMING_SNAKE_CASE_ )
if memory is None:
lowerCAmelCase_ : Union[str, Any] = summary.total
else:
lowerCAmelCase_ : List[str] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 289 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Any=0.0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "geglu" , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : str = "layer_norm" , __lowerCamelCase : bool = False , ):
super().__init__()
UpperCamelCase :List[Any] = only_cross_attention
UpperCamelCase :Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
UpperCamelCase :Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
UpperCamelCase :Tuple = AdaLayerNorm(snake_case_ , snake_case_ )
elif self.use_ada_layer_norm_zero:
UpperCamelCase :Union[str, Any] = AdaLayerNormZero(snake_case_ , snake_case_ )
else:
UpperCamelCase :Optional[int] = nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ )
UpperCamelCase :List[Any] = Attention(
query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , dropout=snake_case_ , bias=snake_case_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
UpperCamelCase :List[str] = (
AdaLayerNorm(snake_case_ , snake_case_ )
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ )
)
UpperCamelCase :int = Attention(
query_dim=snake_case_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case_ , dim_head=snake_case_ , dropout=snake_case_ , bias=snake_case_ , upcast_attention=snake_case_ , ) # is self-attn if encoder_hidden_states is none
else:
UpperCamelCase :Dict = None
UpperCamelCase :Tuple = None
# 3. Feed-forward
UpperCamelCase :int = nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ )
UpperCamelCase :Any = FeedForward(snake_case_ , dropout=snake_case_ , activation_fn=snake_case_ , final_dropout=snake_case_ )
# let chunk size default to None
UpperCamelCase :Any = None
UpperCamelCase :List[str] = 0
def _A ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int ):
# Sets chunk feed-forward
UpperCamelCase :Dict = chunk_size
UpperCamelCase :List[Any] = dim
def _A ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[torch.LongTensor] = None , __lowerCamelCase : Dict[str, Any] = None , __lowerCamelCase : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
UpperCamelCase :List[str] = self.norma(snake_case_ , snake_case_ )
elif self.use_ada_layer_norm_zero:
UpperCamelCase :Tuple = self.norma(
snake_case_ , snake_case_ , snake_case_ , hidden_dtype=hidden_states.dtype )
else:
UpperCamelCase :str = self.norma(snake_case_ )
UpperCamelCase :int = cross_attention_kwargs if cross_attention_kwargs is not None else {}
UpperCamelCase :List[str] = self.attna(
snake_case_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case_ , **snake_case_ , )
if self.use_ada_layer_norm_zero:
UpperCamelCase :Any = gate_msa.unsqueeze(1 ) * attn_output
UpperCamelCase :Optional[int] = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
UpperCamelCase :List[Any] = (
self.norma(snake_case_ , snake_case_ ) if self.use_ada_layer_norm else self.norma(snake_case_ )
)
UpperCamelCase :Optional[Any] = self.attna(
snake_case_ , encoder_hidden_states=snake_case_ , attention_mask=snake_case_ , **snake_case_ , )
UpperCamelCase :str = attn_output + hidden_states
# 3. Feed-forward
UpperCamelCase :Tuple = self.norma(snake_case_ )
if self.use_ada_layer_norm_zero:
UpperCamelCase :List[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
UpperCamelCase :Optional[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
UpperCamelCase :int = torch.cat(
[self.ff(snake_case_ ) for hid_slice in norm_hidden_states.chunk(snake_case_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
UpperCamelCase :int = self.ff(snake_case_ )
if self.use_ada_layer_norm_zero:
UpperCamelCase :Union[str, Any] = gate_mlp.unsqueeze(1 ) * ff_output
UpperCamelCase :Any = ff_output + hidden_states
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 4 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : str = "geglu" , __lowerCamelCase : bool = False , ):
super().__init__()
UpperCamelCase :Optional[Any] = int(dim * mult )
UpperCamelCase :Optional[int] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
UpperCamelCase :Optional[Any] = GELU(snake_case_ , snake_case_ )
if activation_fn == "gelu-approximate":
UpperCamelCase :Dict = GELU(snake_case_ , snake_case_ , approximate="""tanh""" )
elif activation_fn == "geglu":
UpperCamelCase :int = GEGLU(snake_case_ , snake_case_ )
elif activation_fn == "geglu-approximate":
UpperCamelCase :Tuple = ApproximateGELU(snake_case_ , snake_case_ )
UpperCamelCase :int = nn.ModuleList([] )
# project in
self.net.append(snake_case_ )
# project dropout
self.net.append(nn.Dropout(snake_case_ ) )
# project out
self.net.append(nn.Linear(snake_case_ , snake_case_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case_ ) )
def _A ( self : Tuple , __lowerCamelCase : Optional[Any] ):
for module in self.net:
UpperCamelCase :List[Any] = module(snake_case_ )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : str = "none" ):
super().__init__()
UpperCamelCase :Optional[int] = nn.Linear(snake_case_ , snake_case_ )
UpperCamelCase :Union[str, Any] = approximate
def _A ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] ):
if gate.device.type != "mps":
return F.gelu(snake_case_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _A ( self : Union[str, Any] , __lowerCamelCase : Dict ):
UpperCamelCase :Tuple = self.proj(snake_case_ )
UpperCamelCase :Tuple = self.gelu(snake_case_ )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : int , __lowerCamelCase : int , __lowerCamelCase : int ):
super().__init__()
UpperCamelCase :Optional[Any] = nn.Linear(snake_case_ , dim_out * 2 )
def _A ( self : Union[str, Any] , __lowerCamelCase : int ):
if gate.device.type != "mps":
return F.gelu(snake_case_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _A ( self : List[str] , __lowerCamelCase : Dict ):
UpperCamelCase :List[str] = self.proj(snake_case_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(snake_case_ )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
super().__init__()
UpperCamelCase :List[str] = nn.Linear(snake_case_ , snake_case_ )
def _A ( self : Any , __lowerCamelCase : Optional[int] ):
UpperCamelCase :Tuple = self.proj(snake_case_ )
return x * torch.sigmoid(1.702 * x )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple ):
super().__init__()
UpperCamelCase :Dict = nn.Embedding(snake_case_ , snake_case_ )
UpperCamelCase :Union[str, Any] = nn.SiLU()
UpperCamelCase :List[Any] = nn.Linear(snake_case_ , embedding_dim * 2 )
UpperCamelCase :Tuple = nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ )
def _A ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ):
UpperCamelCase :Optional[int] = self.linear(self.silu(self.emb(snake_case_ ) ) )
UpperCamelCase :Any = torch.chunk(snake_case_ , 2 )
UpperCamelCase :str = self.norm(snake_case_ ) * (1 + scale) + shift
return x
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict ):
super().__init__()
UpperCamelCase :Union[str, Any] = CombinedTimestepLabelEmbeddings(snake_case_ , snake_case_ )
UpperCamelCase :str = nn.SiLU()
UpperCamelCase :Optional[int] = nn.Linear(snake_case_ , 6 * embedding_dim , bias=snake_case_ )
UpperCamelCase :Tuple = nn.LayerNorm(snake_case_ , elementwise_affine=snake_case_ , eps=1E-6 )
def _A ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None ):
UpperCamelCase :Union[str, Any] = self.linear(self.silu(self.emb(snake_case_ , snake_case_ , hidden_dtype=snake_case_ ) ) )
UpperCamelCase :Optional[int] = emb.chunk(6 , dim=1 )
UpperCamelCase :Optional[Any] = self.norm(snake_case_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : float = 1E-5 ):
super().__init__()
UpperCamelCase :Optional[int] = num_groups
UpperCamelCase :List[str] = eps
if act_fn is None:
UpperCamelCase :List[Any] = None
else:
UpperCamelCase :Tuple = get_activation(snake_case_ )
UpperCamelCase :int = nn.Linear(snake_case_ , out_dim * 2 )
def _A ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
if self.act:
UpperCamelCase :List[str] = self.act(snake_case_ )
UpperCamelCase :Any = self.linear(snake_case_ )
UpperCamelCase :Tuple = emb[:, :, None, None]
UpperCamelCase :Optional[int] = emb.chunk(2 , dim=1 )
UpperCamelCase :Dict = F.group_norm(snake_case_ , self.num_groups , eps=self.eps )
UpperCamelCase :List[Any] = x * (1 + scale) + shift
return x
| 38 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
lowercase__ =getLogger(__name__)
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 8 , lowerCAmelCase__ : int = 1_0_2_4 , lowerCAmelCase__ : Union[str, Any]="val" , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]="summarization" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : int="" , **lowerCAmelCase__ : int , ):
__a : List[Any] = str(lowerCAmelCase__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=lowerCAmelCase__ )
__a : Tuple = Path(lowerCAmelCase__ )
__a : Dict = save_dir.joinpath(f"rank_{local_rank}_output.json" )
torch.cuda.set_device(lowerCAmelCase__ )
__a : Dict = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ ).cuda()
if fpaa:
__a : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(lowerCAmelCase__ , lowerCAmelCase__ ) # update config with task specific params
__a : List[str] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__a : Dict = num_return_sequences
__a : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
if max_source_length is None:
__a : Dict = tokenizer.model_max_length
if prefix is None:
__a : Dict = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__a : List[Any] = SeqaSeqDataset(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_target_length=1_0_2_4 , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__a : Tuple = ds.make_sortish_sampler(lowerCAmelCase__ , distributed=lowerCAmelCase__ , add_extra_examples=lowerCAmelCase__ , shuffle=lowerCAmelCase__ )
__a : List[Any] = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn )
__a : List[Any] = []
for batch in tqdm(lowerCAmelCase__ ):
__a : Any = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , **lowerCAmelCase__ , )
__a : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
__a : int = batch['''ids''']
if num_return_sequences > 1:
__a : List[str] = chunks(lowerCAmelCase__ , lowerCAmelCase__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(lowerCAmelCase__ ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return results, sampler.num_replicas
def __UpperCamelCase ( ):
__a : str = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=lowerCAmelCase__ , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=lowerCAmelCase__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=lowerCAmelCase__ , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument(
'''--type_path''' , type=lowerCAmelCase__ , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=lowerCAmelCase__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=lowerCAmelCase__ , default=8 , required=lowerCAmelCase__ , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=lowerCAmelCase__ , default=6_0_0 , required=lowerCAmelCase__ , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument('''--tgt_lang''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
'''--prefix''' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__a : int = time.time()
__a , __a : Tuple = parser.parse_known_args()
__a : Optional[int] = parse_numeric_n_bool_cl_kwargs(lowerCAmelCase__ )
if generate_kwargs and args.local_rank <= 0:
print(f"parsed the following generate kwargs: {generate_kwargs}" )
__a : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) # this handles locking.
__a : Dict = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"Found files at {json_save_dir} please move or remove them." )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__a : Optional[Any] = {}
if args.src_lang is not None:
__a : int = args.src_lang
if args.tgt_lang is not None:
__a : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=lowerCAmelCase__ )
__a , __a : Tuple = eval_data_dir(
args.data_dir , lowerCAmelCase__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
if args.local_rank <= 0:
__a : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=lowerCAmelCase__ )
__a : List[str] = gather_results_from_each_node(lowerCAmelCase__ , lowerCAmelCase__ , args.sync_timeout )
__a : int = combine_partial_results(lowerCAmelCase__ )
if args.num_return_sequences > 1:
__a : List[Any] = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/" )
save_json(lowerCAmelCase__ , lowerCAmelCase__ )
return
__a : Any = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(lowerCAmelCase__ ) as f:
__a : Optional[int] = [x.rstrip() for x in f.readlines()][: len(lowerCAmelCase__ )]
# Calculate metrics, save metrics, and save _generations.txt
__a : str = '''translation''' in args.task
__a : List[str] = calculate_bleu if calc_bleu else calculate_rouge
__a : Any = '''bleu''' if calc_bleu else '''rouge'''
__a : Dict = score_fn(lowerCAmelCase__ , lowerCAmelCase__ )
__a : Dict = len(lowerCAmelCase__ )
__a : str = time.time() - start_time
__a : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
__a : Tuple = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__a : Optional[int] = save_dir.joinpath(f"{args.type_path}_{metric_name}.json" )
save_json(lowerCAmelCase__ , lowerCAmelCase__ , indent=lowerCAmelCase__ )
print(lowerCAmelCase__ )
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f"{args.type_path}_generations.txt" ) )
if args.debug:
write_txt_file(lowerCAmelCase__ , save_dir.joinpath(f"{args.type_path}.target" ) )
else:
shutil.rmtree(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : Optional[int] ):
__a : Optional[int] = []
for partial_result in partial_results:
records.extend(lowerCAmelCase__ )
__a : Tuple = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x["id"] )
__a : Tuple = [x['''pred'''] for x in records]
return preds
def __UpperCamelCase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ):
# WAIT FOR lots of .json files
__a : Tuple = time.time()
logger.info('''waiting for all nodes to finish''' )
__a : Optional[int] = None
while (time.time() - start_wait) < timeout:
__a : Optional[int] = list(save_dir.glob('''rank_*.json''' ) )
if len(lowerCAmelCase__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__a : Tuple = lmap(lowerCAmelCase__ , lowerCAmelCase__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 216 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 342 | import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self: Tuple , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any]=13 , __lowerCamelCase: Optional[int]=32 , __lowerCamelCase: List[str]=3 , __lowerCamelCase: Dict=4 , __lowerCamelCase: Optional[Any]=[10, 20, 30, 40] , __lowerCamelCase: int=[2, 2, 3, 2] , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Union[str, Any]=True , __lowerCamelCase: Tuple=37 , __lowerCamelCase: Tuple="gelu" , __lowerCamelCase: List[Any]=10 , __lowerCamelCase: Optional[int]=0.02 , __lowerCamelCase: Optional[Any]=["stage2", "stage3", "stage4"] , __lowerCamelCase: Optional[int]=[2, 3, 4] , __lowerCamelCase: int=None , ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = num_stages
__UpperCAmelCase : List[str] = hidden_sizes
__UpperCAmelCase : Any = depths
__UpperCAmelCase : Optional[int] = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : Any = initializer_range
__UpperCAmelCase : List[str] = out_features
__UpperCAmelCase : Tuple = out_indices
__UpperCAmelCase : List[Any] = scope
def _lowerCamelCase ( self: List[Any] ) -> Optional[int]:
__UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self: Tuple ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[Any] = ConvNextVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Any , __lowerCamelCase: Tuple ) -> Tuple:
__UpperCAmelCase : Union[str, Any] = ConvNextVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: int , __lowerCamelCase: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : List[str] = ConvNextVaBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Any = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self: int ) -> List[str]:
__UpperCAmelCase : int = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : str = {"pixel_values": pixel_values}
return config, inputs_dict
def _lowerCamelCase ( self: List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _snake_case ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase__: Dict = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase__: str = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__: Tuple = False
lowerCamelCase__: int = False
lowerCamelCase__: Dict = False
lowerCamelCase__: int = False
lowerCamelCase__: Any = False
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = ConvNextVaModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def _lowerCamelCase ( self: Dict ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: List[Any] ) -> int:
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _lowerCamelCase ( self: Any ) -> Any:
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _lowerCamelCase ( self: str ) -> Optional[Any]:
pass
def _lowerCamelCase ( self: List[Any] ) -> int:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : Optional[Any] = True
if model_class.__name__ in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]:
continue
__UpperCAmelCase : Optional[Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
__UpperCAmelCase : Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : int = True
if (
model_class.__name__
in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__UpperCAmelCase : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__UpperCAmelCase : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
__UpperCAmelCase : Any = model(**__lowerCamelCase ).loss
loss.backward()
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Union[str, Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: str ):
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Any = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> Optional[int]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def _lowerCamelCase ( self: Dict ) -> List[Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) -> List[Any]:
__UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[int] ) -> Dict:
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: List[Any] ) -> Tuple:
__UpperCAmelCase : List[Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__lowerCamelCase )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : int = preprocessor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : str = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : str = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 342 | 1 |
"""simple docstring"""
from math import sqrt
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
for i in range(1, int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def lowerCamelCase__ ( __snake_case = 1_00_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = sum(
i
for i in range(1, __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 194 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = np.dot(__snake_case, __snake_case )
return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=7_00_00 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__snake_case ):
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = np.dot(x.T, h - y ) / y.size
_UpperCamelCase = theta - alpha * gradient # updating the weights
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = cost_function(__snake_case, __snake_case )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_a = datasets.load_iris()
_a = iris.data[:, :2]
_a = (iris.target != 0) * 1
_a = 0.1
_a = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__snake_case, __snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_a) , (_a)) = (x[:, 0].min(), x[:, 0].max())
((_a) , (_a)) = (x[:, 1].min(), x[:, 1].max())
((_a) , (_a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_a = np.c_[xxa.ravel(), xxa.ravel()]
_a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 194 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'time_series_transformer'
__UpperCAmelCase : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = [1, 2, 3, 4, 5, 6, 7] , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 32 , _a = 32 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 64 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 100 , _a = 0.02 , _a=True , **_a , ):
# time series specific configuration
__a = prediction_length
__a = context_length or prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__a = cardinality
else:
__a = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(_a ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __UpperCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 370 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : List[Any] ):
snake_case : int = set()
snake_case : Tuple = []
def parse_line(__lowerCamelCase : Optional[Any] ):
for line in fp:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
snake_case : Tuple = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(__lowerCamelCase ) > 0:
snake_case : List[str] = "\n".join(__lowerCamelCase )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(__lowerCamelCase )
buffer.clear()
continue
else:
snake_case : Tuple = line.strip()
buffer.append(__lowerCamelCase )
if from_gh:
for filename in os.listdir(__lowerCamelCase ):
snake_case : List[Any] = os.path.join(__lowerCamelCase , __lowerCamelCase )
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(__lowerCamelCase ) as fp:
parse_line(__lowerCamelCase )
else:
try:
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(__lowerCamelCase ) as fp:
parse_line(__lowerCamelCase )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : str ):
snake_case : Union[str, Any] = set()
snake_case : List[Any] = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(__lowerCamelCase , __lowerCamelCase ) )
return selected_warnings
if __name__ == "__main__":
def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ):
return values.split("," )
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__lowerCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__lowerCamelCase = extract_warnings(args.output_dir, args.targets)
__lowerCamelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 59 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : bytes ) -> None:
A = data
# Initialize hash values
A = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
A = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes:
A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64))
A = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
import hashlib
A = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A = f.read()
else:
A = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main() | 74 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
if start < end:
_UpperCAmelCase = randint(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = a[end]
_UpperCAmelCase = a[pivot]
_UpperCAmelCase = temp
_UpperCAmelCase , _UpperCAmelCase = _in_place_partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
count += _in_place_quick_sort(_UpperCAmelCase , _UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(_UpperCAmelCase , p + 1 , _UpperCAmelCase )
return count
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = randint(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = a[end]
_UpperCAmelCase = a[pivot]
_UpperCAmelCase = temp
_UpperCAmelCase = start - 1
for index in range(_UpperCAmelCase , _UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_UpperCAmelCase = new_pivot_index + 1
_UpperCAmelCase = a[new_pivot_index]
_UpperCAmelCase = a[index]
_UpperCAmelCase = temp
_UpperCAmelCase = a[new_pivot_index + 1]
_UpperCAmelCase = a[end]
_UpperCAmelCase = temp
return new_pivot_index + 1, count
UpperCAmelCase__ = TemporaryFile()
UpperCAmelCase__ = 100 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ = 0, 1 # mean and standard deviation
UpperCAmelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ = np.load(outfile)
UpperCAmelCase__ = len(M) - 1
UpperCAmelCase__ = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 290 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = CLIPTokenizer
UpperCamelCase = CLIPTokenizerFast
UpperCamelCase = True
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(A))
def _lowerCamelCase ( self : Optional[Any] , **A : str) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Any , **A : Dict) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Optional[int] , A : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
_UpperCAmelCase = tokenizer.tokenize(A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , A)
@require_ftfy
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_UpperCAmelCase = 'xa\u0303y' + ' ' + 'x\xe3y'
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on unicode of space type
_UpperCAmelCase = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on unicode of line break type
_UpperCAmelCase = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase = F"{text_of_1_token} {text_of_1_token}"
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A)
self.assertEqual(encoding.offset_mapping[0] , (0, len(A)))
self.assertEqual(
encoding.offset_mapping[1] , (len(A) + 1, len(A) + 1 + len(A)) , )
_UpperCAmelCase = F" {text}"
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A) + 1, 1 + len(A) + 1 + len(A)) , )
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
with self.assertRaises(A) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer')
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.'))
@require_ftfy
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
pass
| 290 | 1 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase_ = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase_ = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __lowerCamelCase ( a_ : Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE :Union[str, Any] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=a_ )[0]
@deprecated(a_ , '''Please use tf.data to implement this functionality.''' )
def __lowerCamelCase ( a_ : Optional[Any] ) -> int:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a_ ) as bytestream:
__SCREAMING_SNAKE_CASE :Optional[int] = _readaa(a_ )
if magic != 20_51:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = _readaa(a_ )
__SCREAMING_SNAKE_CASE :List[Any] = _readaa(a_ )
__SCREAMING_SNAKE_CASE :List[str] = _readaa(a_ )
__SCREAMING_SNAKE_CASE :Any = bytestream.read(rows * cols * num_images )
__SCREAMING_SNAKE_CASE :List[str] = numpy.frombuffer(a_ , dtype=numpy.uinta )
__SCREAMING_SNAKE_CASE :List[str] = data.reshape(a_ , a_ , a_ , 1 )
return data
@deprecated(a_ , '''Please use tf.one_hot on tensors.''' )
def __lowerCamelCase ( a_ : Any , a_ : Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE :int = labels_dense.shape[0]
__SCREAMING_SNAKE_CASE :Union[str, Any] = numpy.arange(a_ ) * num_classes
__SCREAMING_SNAKE_CASE :Any = numpy.zeros((num_labels, num_classes) )
__SCREAMING_SNAKE_CASE :Optional[int] = 1
return labels_one_hot
@deprecated(a_ , '''Please use tf.data to implement this functionality.''' )
def __lowerCamelCase ( a_ : Tuple , a_ : Optional[int]=False , a_ : List[str]=10 ) -> List[Any]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a_ ) as bytestream:
__SCREAMING_SNAKE_CASE :str = _readaa(a_ )
if magic != 20_49:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__SCREAMING_SNAKE_CASE :Any = _readaa(a_ )
__SCREAMING_SNAKE_CASE :int = bytestream.read(a_ )
__SCREAMING_SNAKE_CASE :str = numpy.frombuffer(a_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(a_ , a_ )
return labels
class _SCREAMING_SNAKE_CASE:
@deprecated(
SCREAMING_SNAKE_CASE__ ,'''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' ,)
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=dtypes.floataa ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Any = random_seed.get_seed(SCREAMING_SNAKE_CASE__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__SCREAMING_SNAKE_CASE :int = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
__SCREAMING_SNAKE_CASE :int = 1_00_00
__SCREAMING_SNAKE_CASE :Optional[int] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__SCREAMING_SNAKE_CASE :int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__SCREAMING_SNAKE_CASE :List[str] = images.reshape(
images.shape[0] ,images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__SCREAMING_SNAKE_CASE :Optional[int] = images.astype(numpy.floataa )
__SCREAMING_SNAKE_CASE :Optional[int] = numpy.multiply(SCREAMING_SNAKE_CASE__ ,1.0 / 2_5_5.0 )
__SCREAMING_SNAKE_CASE :Any = images
__SCREAMING_SNAKE_CASE :Optional[int] = labels
__SCREAMING_SNAKE_CASE :Any = 0
__SCREAMING_SNAKE_CASE :Any = 0
@property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return self._images
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return self._labels
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._num_examples
@property
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return self._epochs_completed
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__=True ) -> List[str]:
"""simple docstring"""
if fake_data:
__SCREAMING_SNAKE_CASE :List[Any] = [1] * 7_84
__SCREAMING_SNAKE_CASE :Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE__ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE__ )],
)
__SCREAMING_SNAKE_CASE :Any = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__SCREAMING_SNAKE_CASE :List[str] = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = self.images[perma]
__SCREAMING_SNAKE_CASE :Optional[int] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__SCREAMING_SNAKE_CASE :List[Any] = self._num_examples - start
__SCREAMING_SNAKE_CASE :Optional[int] = self._images[start : self._num_examples]
__SCREAMING_SNAKE_CASE :Optional[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__SCREAMING_SNAKE_CASE :Optional[int] = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.images[perm]
__SCREAMING_SNAKE_CASE :List[str] = self.labels[perm]
# Start next epoch
__SCREAMING_SNAKE_CASE :str = 0
__SCREAMING_SNAKE_CASE :Optional[Any] = batch_size - rest_num_examples
__SCREAMING_SNAKE_CASE :Dict = self._index_in_epoch
__SCREAMING_SNAKE_CASE :Optional[int] = self._images[start:end]
__SCREAMING_SNAKE_CASE :int = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) ,axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) ,axis=0 ),
)
else:
self._index_in_epoch += batch_size
__SCREAMING_SNAKE_CASE :Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(a_ , '''Please write your own downloading logic.''' )
def __lowerCamelCase ( a_ : int , a_ : Dict , a_ : Any ) -> Tuple:
if not gfile.Exists(a_ ):
gfile.MakeDirs(a_ )
__SCREAMING_SNAKE_CASE :int = os.path.join(a_ , a_ )
if not gfile.Exists(a_ ):
urllib.request.urlretrieve(a_ , a_ ) # noqa: S310
with gfile.GFile(a_ ) as f:
__SCREAMING_SNAKE_CASE :Dict = f.size()
print('''Successfully downloaded''' , a_ , a_ , '''bytes.''' )
return filepath
@deprecated(
a_ , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __lowerCamelCase ( a_ : Tuple , a_ : List[Any]=False , a_ : str=False , a_ : Optional[Any]=dtypes.floataa , a_ : List[Any]=True , a_ : Optional[Any]=50_00 , a_ : Optional[Any]=None , a_ : Dict=DEFAULT_SOURCE_URL , ) -> int:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=a_ , one_hot=a_ , dtype=a_ , seed=a_ )
__SCREAMING_SNAKE_CASE :str = fake()
__SCREAMING_SNAKE_CASE :Optional[Any] = fake()
__SCREAMING_SNAKE_CASE :Any = fake()
return _Datasets(train=a_ , validation=a_ , test=a_ )
if not source_url: # empty string check
__SCREAMING_SNAKE_CASE :Any = DEFAULT_SOURCE_URL
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''train-images-idx3-ubyte.gz'''
__SCREAMING_SNAKE_CASE :str = '''train-labels-idx1-ubyte.gz'''
__SCREAMING_SNAKE_CASE :Dict = '''t10k-images-idx3-ubyte.gz'''
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''t10k-labels-idx1-ubyte.gz'''
__SCREAMING_SNAKE_CASE :Tuple = _maybe_download(
a_ , a_ , source_url + train_images_file )
with gfile.Open(a_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Union[str, Any] = _extract_images(a_ )
__SCREAMING_SNAKE_CASE :Any = _maybe_download(
a_ , a_ , source_url + train_labels_file )
with gfile.Open(a_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Any = _extract_labels(a_ , one_hot=a_ )
__SCREAMING_SNAKE_CASE :Tuple = _maybe_download(
a_ , a_ , source_url + test_images_file )
with gfile.Open(a_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Dict = _extract_images(a_ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = _maybe_download(
a_ , a_ , source_url + test_labels_file )
with gfile.Open(a_ , '''rb''' ) as f:
__SCREAMING_SNAKE_CASE :Optional[Any] = _extract_labels(a_ , one_hot=a_ )
if not 0 <= validation_size <= len(a_ ):
__SCREAMING_SNAKE_CASE :Tuple = (
'''Validation size should be between 0 and '''
f'''{len(a_ )}. Received: {validation_size}.'''
)
raise ValueError(a_ )
__SCREAMING_SNAKE_CASE :Any = train_images[:validation_size]
__SCREAMING_SNAKE_CASE :List[str] = train_labels[:validation_size]
__SCREAMING_SNAKE_CASE :Optional[Any] = train_images[validation_size:]
__SCREAMING_SNAKE_CASE :Optional[Any] = train_labels[validation_size:]
__SCREAMING_SNAKE_CASE :Tuple = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__SCREAMING_SNAKE_CASE :List[Any] = _DataSet(a_ , a_ , **a_ )
__SCREAMING_SNAKE_CASE :Optional[int] = _DataSet(a_ , a_ , **a_ )
__SCREAMING_SNAKE_CASE :str = _DataSet(a_ , a_ , **a_ )
return _Datasets(train=a_ , validation=a_ , test=a_ ) | 191 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE_ : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE_ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet''' ,)
return unet
@property
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet_class_cond''' ,)
return unet
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
if class_cond:
__SCREAMING_SNAKE_CASE :str = self.dummy_cond_unet
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
__SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0 ) -> Dict:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[str] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE :Optional[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = 0
__SCREAMING_SNAKE_CASE :Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_components()
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = 1
__SCREAMING_SNAKE_CASE :List[str] = None
__SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :int = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :Any = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :Optional[Any] = None
__SCREAMING_SNAKE_CASE :List[Any] = 0
__SCREAMING_SNAKE_CASE :Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :int = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__SCREAMING_SNAKE_CASE :int = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ ,shape=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = latents
return inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> int:
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) == str:
__SCREAMING_SNAKE_CASE :int = torch.device(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = randn_tensor(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )
return latents
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Dict = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = self.get_inputs()
__SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :Union[str, Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Dict = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_inputs()
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :int = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :str = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 191 | 1 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__a , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _A (__a , __a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = _distribute_shards(**__a )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = _split_gen_kwargs(__a , __a )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def _A (__a , __a ) -> Union[str, Any]:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(__a ):
_number_of_shards_in_gen_kwargs(__a )
else:
SCREAMING_SNAKE_CASE_ : Any = _number_of_shards_in_gen_kwargs(__a )
assert out == expected
| 318 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
UpperCAmelCase_ : List[Any] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
UpperCAmelCase_ : Optional[int] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
UpperCAmelCase_ : Tuple = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=False):
'''simple docstring'''
if return_pvalue:
SCREAMING_SNAKE_CASE_ : int = pearsonr(lowercase_ , lowercase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase_ , lowercase_)[0])}
| 318 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
SCREAMING_SNAKE_CASE_: str =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Optional[Any] , *__a : int , **__a : str ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 |
"""simple docstring"""
def A__ ( UpperCamelCase ):
A = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = []
for current_row_idx in range(UpperCamelCase ):
A = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def A__ ( UpperCamelCase , UpperCamelCase ):
A = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A, A = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
A = triangle[current_row_idx - 1][current_col_idx - 1]
A = triangle[current_row_idx - 1][current_col_idx]
A = above_to_left_elt + above_to_right_elt
def A__ ( UpperCamelCase ):
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
A = [[1]]
for row_index in range(1 , UpperCamelCase ):
A = [0] + result[-1] + [0]
A = row_index + 1
# Calculate the number of distinct elements in a row
A = sum(divmod(UpperCamelCase , 2 ) )
A = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
A = F"{func.__name__}({value})"
A = timeit(F"__main__.{call}" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"{call:38} -- {timing:.4f} seconds" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 292 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Dict = 'convbert'
def __init__(self : Dict , __UpperCAmelCase : int=3_0_5_2_2 , __UpperCAmelCase : str=7_6_8 , __UpperCAmelCase : Optional[Any]=1_2 , __UpperCAmelCase : int=1_2 , __UpperCAmelCase : Tuple=3_0_7_2 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : str=0.1 , __UpperCAmelCase : Dict=5_1_2 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : List[str]=1E-12 , __UpperCAmelCase : Union[str, Any]=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : List[Any]=7_6_8 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : List[Any]=9 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Any=None , **__UpperCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = embedding_size
UpperCAmelCase__ = head_ratio
UpperCAmelCase__ = conv_kernel_size
UpperCAmelCase__ = num_groups
UpperCAmelCase__ = classifier_dropout
class A ( UpperCAmelCase_ ):
@property
def lowercase_ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 143 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'},
'tokenizer_file': {
'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'
},
}
UpperCamelCase__ = {
'google/pegasus-xsum': 5_1_2,
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] = PegasusTokenizer
__UpperCAmelCase : Any = ['input_ids', 'attention_mask']
def __init__(self : Optional[int] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : Union[str, Any]="<unk>" , __UpperCAmelCase : int="<mask_2>" , __UpperCAmelCase : Optional[Any]="<mask_1>" , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=1_0_3 , **__UpperCAmelCase : str , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = offset
if additional_special_tokens is not None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError(
f"""additional_special_tokens should be of type {type(__UpperCAmelCase )}, but is"""
f""" {type(__UpperCAmelCase )}""" )
UpperCAmelCase__ = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(__UpperCAmelCase ) , self.offset - 1 )
]
if len(set(__UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCAmelCase__ = additional_special_tokens_extended
else:
UpperCAmelCase__ = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , pad_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , mask_token_sent=__UpperCAmelCase , offset=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def lowercase_ (self : List[Any] , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List , __UpperCAmelCase : Optional[List] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__UpperCAmelCase )
elif token_ids_a is None:
return self._special_token_mask(__UpperCAmelCase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ (self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 143 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__snake_case : Tuple = logging.get_logger('transformers.models.speecht5')
__snake_case : Any = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
__snake_case : Union[str, Any] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
__snake_case : Union[str, Any] = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
__snake_case : Dict = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
__snake_case : Optional[int] = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
__snake_case : Union[str, Any] = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
__snake_case : Tuple = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
__snake_case : str = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
__snake_case : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__snake_case : str = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case : str = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case : Optional[int] = []
__snake_case : str = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
__snake_case : Any = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
__snake_case : Optional[int] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
__snake_case : Dict = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Dict:
for attribute in key.split("." ):
__lowerCAmelCase : Tuple = getattr(__snake_case ,__snake_case )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(__snake_case ,__snake_case ).shape
else:
__lowerCAmelCase : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCAmelCase : Optional[int] = value
elif weight_type == "weight_g":
__lowerCAmelCase : Tuple = value
elif weight_type == "weight_v":
__lowerCAmelCase : str = value
elif weight_type == "bias":
__lowerCAmelCase : int = value
elif weight_type == "running_mean":
__lowerCAmelCase : List[Any] = value
elif weight_type == "running_var":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "num_batches_tracked":
__lowerCAmelCase : Union[str, Any] = value
else:
__lowerCAmelCase : List[str] = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _lowercase ( __snake_case ,__snake_case ) -> Optional[int]:
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCAmelCase , __lowerCAmelCase : Any = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> int:
__lowerCAmelCase : Optional[Any] = []
if task == "s2t":
__lowerCAmelCase : List[str] = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCAmelCase : Tuple = MAPPING_S2T
__lowerCAmelCase : List[Any] = IGNORE_KEYS_S2T
elif task == "t2s":
__lowerCAmelCase : Any = None
__lowerCAmelCase : List[Any] = MAPPING_T2S
__lowerCAmelCase : Optional[int] = IGNORE_KEYS_T2S
elif task == "s2s":
__lowerCAmelCase : str = hf_model.speechta.encoder.prenet.feature_encoder
__lowerCAmelCase : Any = MAPPING_S2S
__lowerCAmelCase : Any = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__snake_case ,__snake_case ):
logger.info(F"""{name} was ignored""" )
continue
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case ,__snake_case ,__snake_case ,__snake_case ,hf_model.config.feat_extract_norm == "group" ,)
__lowerCAmelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__lowerCAmelCase , __lowerCAmelCase : List[str] = key.split(".*." )
if prefix in name and suffix in name:
__lowerCAmelCase : Optional[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__lowerCAmelCase : Union[str, Any] = True
if "*" in mapped_key:
__lowerCAmelCase : Tuple = name.split(__snake_case )[0].split("." )[-2]
__lowerCAmelCase : int = mapped_key.replace("*" ,__snake_case )
if "weight_g" in name:
__lowerCAmelCase : List[str] = "weight_g"
elif "weight_v" in name:
__lowerCAmelCase : List[Any] = "weight_v"
elif "bias" in name:
__lowerCAmelCase : List[str] = "bias"
elif "weight" in name:
__lowerCAmelCase : List[str] = "weight"
elif "running_mean" in name:
__lowerCAmelCase : Optional[Any] = "running_mean"
elif "running_var" in name:
__lowerCAmelCase : Optional[int] = "running_var"
elif "num_batches_tracked" in name:
__lowerCAmelCase : Optional[int] = "num_batches_tracked"
else:
__lowerCAmelCase : Dict = None
set_recursively(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Tuple:
__lowerCAmelCase : Any = full_name.split("conv_layers." )[-1]
__lowerCAmelCase : List[str] = name.split("." )
__lowerCAmelCase : int = int(items[0] )
__lowerCAmelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCAmelCase : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCAmelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCAmelCase : Dict = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCAmelCase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=None ,__snake_case=None ,__snake_case=None ,) -> Tuple:
if config_path is not None:
__lowerCAmelCase : int = SpeechTaConfig.from_pretrained(__snake_case )
else:
__lowerCAmelCase : Union[str, Any] = SpeechTaConfig()
if task == "s2t":
__lowerCAmelCase : int = config.max_text_positions
__lowerCAmelCase : Tuple = SpeechTaForSpeechToText(__snake_case )
elif task == "t2s":
__lowerCAmelCase : Optional[Any] = 1_876
__lowerCAmelCase : Optional[int] = 600
__lowerCAmelCase : Any = config.max_speech_positions
__lowerCAmelCase : str = SpeechTaForTextToSpeech(__snake_case )
elif task == "s2s":
__lowerCAmelCase : str = 1_876
__lowerCAmelCase : Optional[int] = config.max_speech_positions
__lowerCAmelCase : Union[str, Any] = SpeechTaForSpeechToSpeech(__snake_case )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__lowerCAmelCase : Any = SpeechTaTokenizer(__snake_case ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__lowerCAmelCase : Tuple = AddedToken("<mask>" ,lstrip=__snake_case ,rstrip=__snake_case )
__lowerCAmelCase : Union[str, Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
__lowerCAmelCase : List[str] = SpeechTaFeatureExtractor()
__lowerCAmelCase : List[Any] = SpeechTaProcessor(tokenizer=__snake_case ,feature_extractor=__snake_case )
processor.save_pretrained(__snake_case )
__lowerCAmelCase : List[Any] = torch.load(__snake_case )
recursively_load_weights(fairseq_checkpoint["model"] ,__snake_case ,__snake_case )
model.save_pretrained(__snake_case )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__snake_case : Any = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 269 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : Any = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'mctct'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: str=8065 , _SCREAMING_SNAKE_CASE: str=1536 , _SCREAMING_SNAKE_CASE: str=36 , _SCREAMING_SNAKE_CASE: Optional[Any]=6144 , _SCREAMING_SNAKE_CASE: Optional[Any]=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=384 , _SCREAMING_SNAKE_CASE: Optional[Any]=920 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1e-5 , _SCREAMING_SNAKE_CASE: List[Any]=0.3 , _SCREAMING_SNAKE_CASE: Optional[Any]="relu" , _SCREAMING_SNAKE_CASE: Optional[int]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=0.3 , _SCREAMING_SNAKE_CASE: Dict=0.3 , _SCREAMING_SNAKE_CASE: List[Any]=1 , _SCREAMING_SNAKE_CASE: Optional[Any]=0 , _SCREAMING_SNAKE_CASE: List[str]=2 , _SCREAMING_SNAKE_CASE: Union[str, Any]=1 , _SCREAMING_SNAKE_CASE: Tuple=0.3 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: int=(7,) , _SCREAMING_SNAKE_CASE: str=(3,) , _SCREAMING_SNAKE_CASE: Union[str, Any]=80 , _SCREAMING_SNAKE_CASE: Tuple=1 , _SCREAMING_SNAKE_CASE: Dict=None , _SCREAMING_SNAKE_CASE: Tuple="sum" , _SCREAMING_SNAKE_CASE: List[str]=False , **_SCREAMING_SNAKE_CASE: Tuple , ) -> Tuple:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Dict = attention_head_dim
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Tuple = layerdrop
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : str = pad_token_id
__lowerCAmelCase : Optional[int] = bos_token_id
__lowerCAmelCase : Union[str, Any] = eos_token_id
__lowerCAmelCase : Any = conv_glu_dim
__lowerCAmelCase : Optional[int] = conv_dropout
__lowerCAmelCase : Union[str, Any] = num_conv_layers
__lowerCAmelCase : Optional[int] = input_feat_per_channel
__lowerCAmelCase : Union[str, Any] = input_channels
__lowerCAmelCase : Optional[Any] = conv_channels
__lowerCAmelCase : Dict = ctc_loss_reduction
__lowerCAmelCase : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
__lowerCAmelCase : List[str] = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""") | 269 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = position
_UpperCAmelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCAmelCase = []
for position in positions:
_UpperCAmelCase , _UpperCAmelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_SCREAMING_SNAKE_CASE )
return permissible_positions
def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : tuple[int, int] , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if is_complete(_SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
_UpperCAmelCase , _UpperCAmelCase = position
if board[y][x] == 0:
_UpperCAmelCase = curr + 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ):
return True
_UpperCAmelCase = 0
return False
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
_UpperCAmelCase = 0
_UpperCAmelCase = f'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
"""simple docstring"""
import math
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ):
'''simple docstring'''
_UpperCAmelCase = end or len(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = i
_UpperCAmelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCAmelCase = array[temp_index - 1]
temp_index -= 1
_UpperCAmelCase = temp_index_value
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ): # Max Heap
'''simple docstring'''
_UpperCAmelCase = index
_UpperCAmelCase = 2 * index + 1 # Left Node
_UpperCAmelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCAmelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCAmelCase = right_index
if largest != index:
_UpperCAmelCase , _UpperCAmelCase = array[largest], array[index]
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
for i in range(n // 2 , -1 , -1 ):
heapify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i in range(n - 1 , 0 , -1 ):
_UpperCAmelCase , _UpperCAmelCase = array[0], array[i]
heapify(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
return array
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = low
_UpperCAmelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCAmelCase , _UpperCAmelCase = array[j], array[i]
i += 1
def lowercase ( _SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return array
_UpperCAmelCase = 2 * math.ceil(math.loga(len(_SCREAMING_SNAKE_CASE ) ) )
_UpperCAmelCase = 16
return intro_sort(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_SCREAMING_SNAKE_CASE )
max_depth -= 1
_UpperCAmelCase = median_of_a(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , start + ((end - start) // 2) + 1 , end - 1 )
_UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
intro_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = p
return insertion_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[str] = input("Enter numbers separated by a comma : ").strip()
__A : Optional[Any] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 326 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
UpperCAmelCase : List[str] = "examples/"
UpperCAmelCase : str = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
UpperCAmelCase : Tuple = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
UpperCAmelCase : List[str] = "README.md"
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ = f.read()
lowercase_ , lowercase_ = REPLACE_PATTERNS[pattern]
lowercase_ = replace.replace("""VERSION""" , __lowerCAmelCase )
lowercase_ = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = """🤗 Transformers currently provides the following architectures"""
lowercase_ = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ = f.readlines()
# Find the start of the list.
lowercase_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowercase_ = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowercase_ = f.read()
lowercase_ = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=False ) -> Any:
'''simple docstring'''
lowercase_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowercase_ = default_version.base_version
elif patch:
lowercase_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowercase_ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def _SCREAMING_SNAKE_CASE () -> str:
'''simple docstring'''
lowercase_ = get_version()
lowercase_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ = current_version.base_version
# Check with the user we got that right.
lowercase_ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
lowercase_ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
UpperCAmelCase : List[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 136 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "Speech2TextFeatureExtractor"
lowercase__ = "Speech2TextTokenizer"
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_)
lowercase_ = self.feature_extractor
lowercase_ = False
def __call__( self : Dict , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str]):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_)
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""")
lowercase_ = kwargs.pop("""raw_speech""")
else:
lowercase_ = kwargs.pop("""audio""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""sampling_rate""" , lowerCAmelCase_)
lowercase_ = kwargs.pop("""text""" , lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
lowercase_ = args[0]
lowercase_ = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""")
if audio is not None:
lowercase_ = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_)
if text is not None:
lowercase_ = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_)
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase_ = encodings["""input_ids"""]
return inputs
def _UpperCAmelCase ( self : List[str] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : str):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_)
@contextmanager
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""")
lowercase_ = True
lowercase_ = self.tokenizer
yield
lowercase_ = self.feature_extractor
lowercase_ = False
| 136 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__lowerCAmelCase = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
__lowerCAmelCase = {
'''camembert-base''': 5_12,
}
__lowerCAmelCase = '''▁'''
class __a ( __UpperCamelCase ):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Tuple = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=["<s>NOTUSED", "</s>NOTUSED"] , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
lowercase__: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
lowercase__: Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
lowercase__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
lowercase__: Any = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase__: Tuple = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
lowercase__: str = len(self.fairseq_tokens_to_ids )
lowercase__: Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase__: Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__: int = [self.cls_token_id]
lowercase__: Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__: List[Any] = [self.sep_token_id]
lowercase__: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[str] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowerCAmelCase__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: Any = []
lowercase__: List[Any] = ''
lowercase__: int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
lowercase__: int = True
lowercase__: str = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
lowercase__: str = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def __getstate__( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = self.__dict__.copy()
lowercase__: List[str] = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__: Optional[int] = {}
lowercase__: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase__: Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , 'wb' ) as fi:
lowercase__: List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 361 |
import torch
from diffusers import StableDiffusionPipeline
__lowerCAmelCase = '''path-to-your-trained-model'''
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
__lowerCAmelCase = '''A photo of sks dog in a bucket'''
__lowerCAmelCase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 288 | 0 |
"""simple docstring"""
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = name
lowerCamelCase = value
lowerCamelCase = weight
def __repr__( self ):
"""simple docstring"""
return f'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.value
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.name
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.weight
def _lowerCAmelCase ( self ):
"""simple docstring"""
return self.value / self.weight
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCamelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCamelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCamelCase = []
lowerCamelCase , lowerCamelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a__ ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
"""simple docstring"""
import operator as op
lowerCAmelCase : Dict = """scaler.pt"""
lowerCAmelCase : Tuple = """pytorch_model"""
lowerCAmelCase : Union[str, Any] = """random_states"""
lowerCAmelCase : Union[str, Any] = """optimizer"""
lowerCAmelCase : Dict = """scheduler"""
lowerCAmelCase : int = """pytorch_model.bin"""
lowerCAmelCase : str = """pytorch_model.bin.index.json"""
lowerCAmelCase : Union[str, Any] = """model.safetensors"""
lowerCAmelCase : List[Any] = """model.safetensors.index.json"""
lowerCAmelCase : List[Any] = """1.10.2"""
lowerCAmelCase : Any = """py38"""
lowerCAmelCase : Optional[int] = """4.17.0"""
lowerCAmelCase : str = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
lowerCAmelCase : Tuple = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
lowerCAmelCase : List[Any] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
lowerCAmelCase : List[str] = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
lowerCAmelCase : List[str] = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
lowerCAmelCase : Any = """2.0.1"""
lowerCAmelCase : List[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
lowerCAmelCase : Union[str, Any] = ["""default""", """reduce-overhead""", """max-autotune"""]
lowerCAmelCase : Optional[int] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
lowerCAmelCase : Union[str, Any] = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
lowerCAmelCase : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
lowerCAmelCase : Optional[Any] = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 291 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
snake_case_ = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
snake_case_ = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
snake_case_ = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
if version.parse(scb.__version__ ) < version.parse('1.4.12' ):
raise ImportWarning(
'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'
'You can install it with `pip install "sacrebleu>=1.4.12"`.' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='http://www.cs.umd.edu/~snover/tercom/' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#ter'] , reference_urls=[
'https://github.com/jhclark/tercom',
] , )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[str] , lowercase_ :Union[str, Any] , lowercase_ :List[str] = False , lowercase_ :Dict = False , lowercase_ :int = False , lowercase_ :Union[str, Any] = False , ) -> Optional[Any]:
UpperCAmelCase = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError('Sacrebleu requires the same number of references for each prediction' )
UpperCAmelCase = [[refs[i] for refs in references] for i in range(__a )]
UpperCAmelCase = TER(
normalized=__a , no_punct=__a , asian_support=__a , case_sensitive=__a , )
UpperCAmelCase = sb_ter.corpus_score(__a , __a )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 362 |
"""simple docstring"""
from math import factorial, radians
def _lowerCAmelCase ( lowercase_ , lowercase_ = 18 , lowercase_ = 10 ):
UpperCAmelCase = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
UpperCAmelCase = radians(lowercase_ )
UpperCAmelCase = angle_in_radians
UpperCAmelCase = 3
UpperCAmelCase = -1
for _ in range(lowercase_ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase_ )
UpperCAmelCase = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase_ , lowercase_ )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 181 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCAmelCase__ : str = logging.get_logger(__name__)
@dataclass
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
UpperCAmelCase__ = deprecated_arg[3:]
setattr(self ,lowerCamelCase__ ,not kwargs.pop(lowerCamelCase__ ) )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
UpperCAmelCase__ = kwargs.pop('torchscript' ,self.torchscript )
UpperCAmelCase__ = kwargs.pop('torch_xla_tpu_print_metrics' ,self.torch_xla_tpu_print_metrics )
UpperCAmelCase__ = kwargs.pop('fp16_opt_level' ,self.fpaa_opt_level )
super().__init__(**lowerCamelCase__ )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Trace the models using torchscript"} )
snake_case__ = field(default=__UpperCAmelCase , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
snake_case__ = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def __lowerCAmelCase ( self : Optional[int] ):
requires_backends(self ,['torch'] )
logger.info('PyTorch: setting up devices' )
if not self.cuda:
UpperCAmelCase__ = torch.device('cpu' )
UpperCAmelCase__ = 0
elif is_torch_tpu_available():
UpperCAmelCase__ = xm.xla_device()
UpperCAmelCase__ = 0
else:
UpperCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
UpperCAmelCase__ = torch.cuda.device_count()
return device, n_gpu
@property
def __lowerCAmelCase ( self : Tuple ):
return is_torch_tpu_available() and self.tpu
@property
def __lowerCAmelCase ( self : Any ):
requires_backends(self ,['torch'] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __lowerCAmelCase ( self : Dict ):
requires_backends(self ,['torch'] )
return self._setup_devices[0]
@property
def __lowerCAmelCase ( self : int ):
requires_backends(self ,['torch'] )
return self._setup_devices[1]
@property
def __lowerCAmelCase ( self : List[str] ):
return self.n_gpu > 0
| 98 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
UpperCAmelCase_ : str = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
UpperCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
UpperCAmelCase_ : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
UpperCAmelCase_ : List[str] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
UpperCAmelCase_ : Dict = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
UpperCAmelCase_ : Union[str, Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
UpperCAmelCase_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
UpperCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
UpperCAmelCase_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
UpperCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCAmelCase_ : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Tuple = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
UpperCAmelCase_ : Dict = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : str = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
UpperCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
snake_case__ : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
UpperCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 32 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int=3 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : List[str]=10 , _UpperCAmelCase : Dict=[10, 20, 30, 40] , _UpperCAmelCase : Any=[1, 1, 2, 1] , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Union[str, Any]="relu" , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[Any]=None , ) -> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = self.get_config()
return config, pixel_values
def lowerCamelCase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = FlaxRegNetModel(config=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = FlaxRegNetForImageClassification(config=_UpperCAmelCase )
lowercase__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
A__ = False
A__ = False
A__ = False
def lowerCamelCase__ (self : Optional[Any] ) -> None:
"""simple docstring"""
lowercase__ = FlaxRegNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ (self : Any ) -> str:
"""simple docstring"""
return
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCamelCase__ (self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCamelCase__ (self : int ) -> Dict:
"""simple docstring"""
def check_hidden_states_output(_UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ):
lowercase__ = model_class(_UpperCAmelCase )
lowercase__ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCamelCase__ (self : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
lowercase__ = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase__ = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase__ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ (self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=_UpperCAmelCase , return_tensors="""np""" )
lowercase__ = model(**_UpperCAmelCase )
# verify the logits
lowercase__ = (1, 1000)
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowercase__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
| 146 |
import pytest
A : Optional[Any] = '__dummy_dataset1__'
A : Tuple = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def UpperCamelCase ( ) -> Any:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : Any ) -> str:
"""simple docstring"""
lowercase__ = dataset_loading_script_name
lowercase__ = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=__magic_name__ )
lowercase__ = script_dir / f'''{script_name}.py'''
with open(__magic_name__ , """w""" ) as f:
f.write(__magic_name__ )
return str(__magic_name__ )
| 146 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : List[Any] = logging.get_logger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Optional[int]=False ):
"""simple docstring"""
a :Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
a :str = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a :List[Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
a :Union[str, Any] = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a :Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
a :Dict = in_proj_bias[: config.hidden_size]
a :Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a :Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a :str = in_proj_weight[
-config.hidden_size :, :
]
a :Union[str, Any] = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( UpperCAmelCase_ : Dict ):
"""simple docstring"""
a :List[str] = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any ):
"""simple docstring"""
a :int = dct.pop(UpperCAmelCase_ )
a :Tuple = val
@torch.no_grad()
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
a :Optional[int] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCAmelCase_ )
a :Union[str, Any] = False
a :str = False
a :Union[str, Any] = False
a :str = False
if "vqa" in checkpoint_url:
a :List[str] = True
a :str = 3129
a :Optional[int] = '''huggingface/label-files'''
a :Any = '''vqa2-id2label.json'''
a :Optional[int] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
a :Any = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
a :Optional[Any] = idalabel
a :List[Any] = {v: k for k, v in idalabel.items()}
a :Tuple = ViltForQuestionAnswering(UpperCAmelCase_ )
elif "nlvr" in checkpoint_url:
a :Optional[int] = True
a :List[str] = 2
a :Union[str, Any] = {0: '''False''', 1: '''True'''}
a :List[Any] = {v: k for k, v in config.idalabel.items()}
a :List[str] = 3
a :Any = ViltForImagesAndTextClassification(UpperCAmelCase_ )
elif "irtr" in checkpoint_url:
a :Optional[int] = True
a :List[Any] = ViltForImageAndTextRetrieval(UpperCAmelCase_ )
elif "mlm_itm" in checkpoint_url:
a :Tuple = True
a :Optional[int] = ViltForMaskedLM(UpperCAmelCase_ )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
a :Dict = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='''cpu''' )['''state_dict''']
a :Dict = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ )
if mlm_model or irtr_model:
a :str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
a , a :List[Any] = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCAmelCase_ )
# Define processor
a :Union[str, Any] = ViltImageProcessor(size=384 )
a :List[str] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
a :List[str] = ViltProcessor(UpperCAmelCase_ , UpperCAmelCase_ )
# Forward pass on example inputs (image + text)
if nlvr_model:
a :Tuple = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCAmelCase_ ).raw )
a :Optional[int] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=UpperCAmelCase_ ).raw )
a :Any = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
a :List[Any] = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
a :Union[str, Any] = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
a :int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
a :int = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=UpperCAmelCase_ ).raw )
if mlm_model:
a :List[Any] = '''a bunch of [MASK] laying on a [MASK].'''
else:
a :List[Any] = '''How many cats are there?'''
a :Optional[Any] = processor(UpperCAmelCase_ , UpperCAmelCase_ , return_tensors='''pt''' )
a :List[str] = model(**UpperCAmelCase_ )
# Verify outputs
if mlm_model:
a :Any = torch.Size([1, 11, 3_0522] )
a :List[str] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase_ , atol=1E-4 )
# verify masked token prediction equals "cats"
a :Union[str, Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
a :Tuple = torch.Size([1, 3129] )
a :List[str] = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCAmelCase_ , atol=1E-4 )
# verify vqa prediction equals "2"
a :int = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
a :Tuple = torch.Size([1, 2] )
a :Optional[int] = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
snake_case : List[str] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 94 |
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowerCAmelCase__ = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowerCAmelCase__ = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 68 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : List[Any] = 'ViltImageProcessor'
A_ : Union[str, Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Tuple = self.tokenizer(
text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# add pixel_values + pixel_mask
__lowerCAmelCase : Dict = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
encoding.update(_SCREAMING_SNAKE_CASE )
return encoding
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.tokenizer.model_input_names
__lowerCAmelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 182 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[Any] = KandinskyVaaInpaintPipeline
A_ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
A_ : Any = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
A_ : Any = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A_ : Any = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 1_00
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCAmelCase : Any = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.dummy_unet
__lowerCAmelCase : Optional[Any] = self.dummy_movq
__lowerCAmelCase : Tuple = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase : str = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((2_56, 2_56) )
# create mask
__lowerCAmelCase : Dict = np.ones((64, 64) , dtype=np.floataa )
__lowerCAmelCase : List[str] = 0
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = output.images
__lowerCAmelCase : Any = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : str = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def __lowerCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
__lowerCAmelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
__lowerCAmelCase : Any = np.ones((7_68, 7_68) , dtype=np.floataa )
__lowerCAmelCase : int = 0
__lowerCAmelCase : str = 'a hat'
__lowerCAmelCase : str = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
__lowerCAmelCase : Tuple = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase : Any = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCAmelCase : Tuple = pipeline(
image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , )
__lowerCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 182 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
with open(UpperCamelCase__ ) as metadata_file:
snake_case_ = json.load(UpperCamelCase__ )
snake_case_ = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
snake_case_ = torch.load(UpperCamelCase__ , map_location='cpu' )['module']
# Load the entity vocab file
snake_case_ = load_original_entity_vocab(UpperCamelCase__ )
# add an entry for [MASK2]
snake_case_ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case_ = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case_ = AddedToken('<ent>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
snake_case_ = AddedToken('<ent2>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'tokenizer_config.json' ) , 'r' ) as f:
snake_case_ = json.load(UpperCamelCase__ )
snake_case_ = 'MLukeTokenizer'
with open(os.path.join(UpperCamelCase__ , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = MLukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
snake_case_ = tokenizer.convert_tokens_to_ids(['@'] )[0]
snake_case_ = tokenizer.convert_tokens_to_ids(['#'] )[0]
snake_case_ = state_dict['embeddings.word_embeddings.weight']
snake_case_ = word_emb[ent_init_index].unsqueeze(0 )
snake_case_ = word_emb[enta_init_index].unsqueeze(0 )
snake_case_ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case_ = state_dict[bias_name]
snake_case_ = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case_ = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case_ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case_ = F'''encoder.layer.{layer_index}.attention.self.'''
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
snake_case_ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case_ = state_dict['entity_embeddings.entity_embeddings.weight']
snake_case_ = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
snake_case_ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case_ = state_dict['entity_predictions.bias']
snake_case_ = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
snake_case_ = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case_ = LukeForMaskedLM(config=UpperCamelCase__ ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
snake_case_ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
snake_case_ = state_dict[key]
else:
snake_case_ = state_dict[key]
snake_case_ , snake_case_ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if set(UpperCamelCase__ ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(UpperCamelCase__ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case_ = MLukeTokenizer.from_pretrained(UpperCamelCase__ , task='entity_classification' )
snake_case_ = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
snake_case_ = (0, 9)
snake_case_ = tokenizer(UpperCamelCase__ , entity_spans=[span] , return_tensors='pt' )
snake_case_ = model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case_ = torch.Size((1, 33, 768) )
snake_case_ = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case_ = torch.Size((1, 1, 768) )
snake_case_ = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case_ = MLukeTokenizer.from_pretrained(UpperCamelCase__ )
snake_case_ = 'Tokyo is the capital of <mask>.'
snake_case_ = (24, 30)
snake_case_ = tokenizer(UpperCamelCase__ , entity_spans=[span] , return_tensors='pt' )
snake_case_ = model(**UpperCamelCase__ )
snake_case_ = encoding['input_ids'][0].tolist()
snake_case_ = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
snake_case_ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(UpperCamelCase__ )
snake_case_ = outputs.entity_logits[0][0].argmax().item()
snake_case_ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = ['[MASK]', '[PAD]', '[UNK]']
snake_case_ = [json.loads(UpperCamelCase__ ) for line in open(UpperCamelCase__ )]
snake_case_ = {}
for entry in data:
snake_case_ = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case_ = entity_id
break
snake_case_ = F'''{language}:{entity_name}'''
snake_case_ = entity_id
return new_mapping
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_UpperCAmelCase : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 285 |
def __lowerCamelCase ( ):
'''simple docstring'''
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_UpperCAmelCase : Union[str, Any] = generate_large_matrix()
_UpperCAmelCase : Tuple = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
assert all(row == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for row in grid )
assert all(list(UpperCamelCase__ ) == sorted(UpperCamelCase__ , reverse=UpperCamelCase__ ) for col in zip(*UpperCamelCase__ ) )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(UpperCamelCase__ ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case_ = (left + right) // 2
snake_case_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case_ = mid + 1
else:
snake_case_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(UpperCamelCase__ )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = len(grid[0] )
for i in range(len(UpperCamelCase__ ) ):
snake_case_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(UpperCamelCase__ ) * len(grid[0] )) - total
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 0
for row in grid:
for i, number in enumerate(UpperCamelCase__ ):
if number < 0:
total += len(UpperCamelCase__ ) - i
break
return total
def __lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print('Running benchmarks' )
snake_case_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case_ = timeit(F'''{func}(grid=grid)''' , setup=UpperCamelCase__ , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 285 | 1 |
def a__ ( lowerCAmelCase ) -> list:
if len(lowerCAmelCase ) <= 1:
return [tuple(lowerCAmelCase )]
UpperCAmelCase__ : Any = []
def generate(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Optional[int] = [0] * n
res.append(tuple(lowerCAmelCase ) )
UpperCAmelCase__ : Optional[int] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCAmelCase__ : Optional[int] = arr[i], arr[0]
else:
UpperCAmelCase__ : Optional[Any] = arr[i], arr[c[i]]
res.append(tuple(lowerCAmelCase ) )
c[i] += 1
UpperCAmelCase__ : Optional[int] = 0
else:
UpperCAmelCase__ : Union[str, Any] = 0
i += 1
generate(len(lowerCAmelCase ) , lowerCAmelCase )
return res
if __name__ == "__main__":
_A = input("""Enter numbers separated by a comma:\n""").strip()
_A = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 359 |
"""simple docstring"""
import sys
import turtle
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
_A = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
_A = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 166 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A_ = logging.get_logger(__name__)
A_ = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class _snake_case ( A_ ):
_A : List[str] = '''deberta-v2'''
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Any=128_100 ,SCREAMING_SNAKE_CASE__ : int=1_536 ,SCREAMING_SNAKE_CASE__ : Any=24 ,SCREAMING_SNAKE_CASE__ : str=24 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_144 ,SCREAMING_SNAKE_CASE__ : Dict="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=512 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : List[str]=1e-7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : Dict=-1 ,SCREAMING_SNAKE_CASE__ : Any=0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : List[Any]=0 ,SCREAMING_SNAKE_CASE__ : List[Any]="gelu" ,**SCREAMING_SNAKE_CASE__ : int ,):
super().__init__(**_lowerCamelCase )
SCREAMING_SNAKE_CASE:Optional[int] = hidden_size
SCREAMING_SNAKE_CASE:int = num_hidden_layers
SCREAMING_SNAKE_CASE:List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE:Tuple = intermediate_size
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE:Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE:str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE:List[str] = type_vocab_size
SCREAMING_SNAKE_CASE:List[Any] = initializer_range
SCREAMING_SNAKE_CASE:Any = relative_attention
SCREAMING_SNAKE_CASE:List[Any] = max_relative_positions
SCREAMING_SNAKE_CASE:Tuple = pad_token_id
SCREAMING_SNAKE_CASE:Dict = position_biased_input
# Backwards compatibility
if type(_lowerCamelCase ) == str:
SCREAMING_SNAKE_CASE:str = [x.strip() for x in pos_att_type.lower().split("|" )]
SCREAMING_SNAKE_CASE:Tuple = pos_att_type
SCREAMING_SNAKE_CASE:Tuple = vocab_size
SCREAMING_SNAKE_CASE:List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE:Union[str, Any] = kwargs.get("pooler_hidden_size" ,_lowerCamelCase )
SCREAMING_SNAKE_CASE:Optional[int] = pooler_dropout
SCREAMING_SNAKE_CASE:str = pooler_hidden_act
class _snake_case ( A_ ):
@property
def __UpperCamelCase ( self : str ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE:str = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE:Optional[Any] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def __UpperCamelCase ( self : Any ):
return 12
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional["TensorType"] = None ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 40 ,SCREAMING_SNAKE_CASE__ : int = 40 ,SCREAMING_SNAKE_CASE__ : "PreTrainedTokenizerBase" = None ,):
SCREAMING_SNAKE_CASE:str = super().generate_dummy_inputs(preprocessor=_lowerCamelCase ,framework=_lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 139 |
"""simple docstring"""
def _UpperCAmelCase ( __lowerCamelCase : int = 1_00_00_00 ) -> int:
_snake_case = limit + 1
_snake_case = [0] * limit
for first_term in range(1 , __lowerCamelCase ):
for n in range(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
_snake_case = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_snake_case = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"{solution() = }")
| 288 | 0 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A: Union[str, Any] = logging.get_logger(__name__)
A: Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A: Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : str = field(
default=UpperCAmelCase__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(UpperCAmelCase__ )} )
__lowerCAmelCase : str = field(
default=UpperCAmelCase__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCAmelCase : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCAmelCase : int = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCAmelCase : int = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCAmelCase : int = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCAmelCase : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCAmelCase : int = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCAmelCase : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCAmelCase : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Dict = 'train'
__lowerCAmelCase : List[Any] = 'dev'
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : SquadDataTrainingArguments
__lowerCAmelCase : List[SquadFeatures]
__lowerCAmelCase : Split
__lowerCAmelCase : bool
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = Split.train , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pt" , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = args
UpperCAmelCase : Optional[int] = is_language_sensitive
UpperCAmelCase : int = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
UpperCAmelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
UpperCAmelCase : Any = mode
# Load data features from cache or dataset file
UpperCAmelCase : Union[str, Any] = """v2""" if args.version_2_with_negative else """v1"""
UpperCAmelCase : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase : Optional[Any] = cached_features_file + """.lock"""
with FileLock(_SCREAMING_SNAKE_CASE ):
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
UpperCAmelCase : Dict = time.time()
UpperCAmelCase : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCAmelCase : str = self.old_features["""features"""]
UpperCAmelCase : List[str] = self.old_features.get("""dataset""" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = self.old_features.get("""examples""" , _SCREAMING_SNAKE_CASE )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
UpperCAmelCase : int = self.processor.get_dev_examples(args.data_dir )
else:
UpperCAmelCase : List[Any] = self.processor.get_train_examples(args.data_dir )
UpperCAmelCase , UpperCAmelCase : Dict = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_SCREAMING_SNAKE_CASE , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : Tuple = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _SCREAMING_SNAKE_CASE , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.features )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase : List[Any] = self.features[i]
UpperCAmelCase : str = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCAmelCase : List[str] = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCAmelCase : Union[str, Any] = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCAmelCase : str = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCAmelCase : str = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCAmelCase : Union[str, Any] = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCAmelCase : str = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCAmelCase : Dict = torch.tensor(feature.start_position , dtype=torch.long )
UpperCAmelCase : Tuple = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 76 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _snake_case ( UpperCamelCase : list[list[float]] ):
UpperCAmelCase : int = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase : Dict = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase , UpperCAmelCase : Dict = matrix[1][1], matrix[0][0]
UpperCAmelCase , UpperCAmelCase : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase : Optional[int] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
UpperCAmelCase : List[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase : Dict = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase : List[Any] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase : int = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase : Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase : Optional[int] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase : Optional[Any] = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase : str = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase : Optional[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase : Any = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase : Optional[int] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase : int = array(UpperCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 76 | 1 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[str] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[Any] = "mvp"
__lowerCamelCase : Optional[int] = ["past_key_values"]
__lowerCamelCase : List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, lowerCamelCase__=5_0267, lowerCamelCase__=1024, lowerCamelCase__=12, lowerCamelCase__=4096, lowerCamelCase__=16, lowerCamelCase__=12, lowerCamelCase__=4096, lowerCamelCase__=16, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__="gelu", lowerCamelCase__=1024, lowerCamelCase__=0.1, lowerCamelCase__=0.0, lowerCamelCase__=0.0, lowerCamelCase__=0.02, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=True, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__=True, lowerCamelCase__=2, lowerCamelCase__=2, lowerCamelCase__=False, lowerCamelCase__=100, lowerCamelCase__=800, **lowerCamelCase__, ):
A : Any = vocab_size
A : Union[str, Any] = max_position_embeddings
A : Union[str, Any] = d_model
A : List[str] = encoder_ffn_dim
A : Union[str, Any] = encoder_layers
A : Dict = encoder_attention_heads
A : Tuple = decoder_ffn_dim
A : str = decoder_layers
A : List[str] = decoder_attention_heads
A : Any = dropout
A : Dict = attention_dropout
A : str = activation_dropout
A : List[str] = activation_function
A : Optional[Any] = init_std
A : Union[str, Any] = encoder_layerdrop
A : str = decoder_layerdrop
A : Any = classifier_dropout
A : Tuple = use_cache
A : List[Any] = encoder_layers
A : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
A : List[Any] = use_prompt
A : Tuple = prompt_length
A : Dict = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, is_encoder_decoder=lowerCamelCase__, decoder_start_token_id=lowerCamelCase__, forced_eos_token_id=lowerCamelCase__, **lowerCamelCase__, )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""", lowerCamelCase__ ):
A : Optional[int] = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 116 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_:Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:Dict = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = "camembert"
def __init__( self, lowerCamelCase__=3_0522, lowerCamelCase__=768, lowerCamelCase__=12, lowerCamelCase__=12, lowerCamelCase__=3072, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=512, lowerCamelCase__=2, lowerCamelCase__=0.02, lowerCamelCase__=1e-12, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=2, lowerCamelCase__="absolute", lowerCamelCase__=True, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
A : List[Any] = vocab_size
A : Dict = hidden_size
A : str = num_hidden_layers
A : List[Any] = num_attention_heads
A : List[str] = hidden_act
A : Tuple = intermediate_size
A : Tuple = hidden_dropout_prob
A : List[Any] = attention_probs_dropout_prob
A : Optional[int] = max_position_embeddings
A : Tuple = type_vocab_size
A : List[Any] = initializer_range
A : str = layer_norm_eps
A : Tuple = position_embedding_type
A : str = use_cache
A : Any = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ):
if self.task == "multiple-choice":
A : int = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 116 | 1 |
from collections import defaultdict
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
UpperCamelCase__ : str = first_str.lower().strip()
UpperCamelCase__ : str = second_str.lower().strip()
# Remove whitespace
UpperCamelCase__ : int = first_str.replace(" " , "" )
UpperCamelCase__ : Optional[Any] = second_str.replace(" " , "" )
# Strings of different lengths are not anagrams
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
return False
# Default values for count should be 0
UpperCamelCase__ : Optional[Any] = defaultdict(__lowerCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__lowerCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : List[Any] =input('''Enter the first string ''').strip()
lowerCamelCase : Union[str, Any] =input('''Enter the second string ''').strip()
lowerCamelCase : Tuple =check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""") | 361 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict =logging.get_logger(__name__)
class __a ( A__ ):
_lowerCAmelCase : Optional[int] = '''timm_backbone'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = backbone
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : str = features_only
UpperCamelCase__ : Dict = use_pretrained_backbone
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : List[Any] = out_indices if out_indices is not None else (-1,) | 196 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''PerceiverFeatureExtractor''']
lowerCamelCase_ = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 79 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 0 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
__snake_case : Dict = TypeVar('T')
class lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : bool = True ) -> None:
'''simple docstring'''
A__ : dict[T, list[T]] ={} # dictionary of lists
A__ : Optional[Any] =directed
def lowercase__ ( self : Dict , lowerCAmelCase_ : T , lowerCAmelCase_ : T ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
self.adj_list[destination_vertex].append(lowerCAmelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
A__ : str =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCAmelCase_ )
A__ : int =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A__ : Dict =[destination_vertex]
A__ : Tuple =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCAmelCase_ )
A__ : Any =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A__ : Any =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A__ : Optional[Any] =[destination_vertex]
A__ : Union[str, Any] =[]
return self
def __repr__( self : List[str] ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 136 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __snake_case : int ) -> int:
"""simple docstring"""
A__ : List[Any] =prime_factors(__snake_case )
if is_square_free(__snake_case ):
return -1 if len(__snake_case ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = value
lowerCAmelCase__ = None
lowerCAmelCase__ = None
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = tree
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCAmelCase__ :List[str] = get_logger(__name__)
lowerCAmelCase__ :Optional[Any] = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class __a :
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __a :
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __a ( _a ):
@add_start_docstrings(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
_UpperCAmelCase = inspect.signature(processor.__call__ ).parameters
if len(_SCREAMING_SNAKE_CASE ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
_UpperCAmelCase = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
_UpperCAmelCase = temperature
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = scores / self.temperature
return scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float('Inf' ) , _SCREAMING_SNAKE_CASE = 1 ) -> str:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
_UpperCAmelCase = top_p
_UpperCAmelCase = filter_value
_UpperCAmelCase = min_tokens_to_keep
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = lax.top_k(_SCREAMING_SNAKE_CASE , scores.shape[-1] )
_UpperCAmelCase = jnp.full_like(_SCREAMING_SNAKE_CASE , self.filter_value )
_UpperCAmelCase = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1 ).cumsum(axis=-1 )
_UpperCAmelCase = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
_UpperCAmelCase = jnp.roll(_SCREAMING_SNAKE_CASE , 1 )
score_mask |= score_mask.at[:, 0].set(_SCREAMING_SNAKE_CASE )
# min tokens to keep
_UpperCAmelCase = score_mask.at[:, : self.min_tokens_to_keep].set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jax.lax.sort_key_val(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[-1]
return next_scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -float('Inf' ) , _SCREAMING_SNAKE_CASE = 1 ) -> Any:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = filter_value
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = scores.shape
_UpperCAmelCase = jnp.full(batch_size * vocab_size , self.filter_value )
_UpperCAmelCase = min(self.top_k , scores.shape[-1] ) # Safety check
_UpperCAmelCase = lax.top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.broadcast_to((jnp.arange(_SCREAMING_SNAKE_CASE ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
_UpperCAmelCase = topk_scores.flatten()
_UpperCAmelCase = topk_indices.flatten() + shift
_UpperCAmelCase = next_scores_flat.at[topk_indices_flat].set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = next_scores_flat.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return next_scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = bos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = jnp.full(scores.shape , -float('inf' ) )
_UpperCAmelCase = 1 - jnp.bool_(cur_len - 1 )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.bos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = max_length
_UpperCAmelCase = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = jnp.full(scores.shape , -float('inf' ) )
_UpperCAmelCase = 1 - jnp.bool_(cur_len - self.max_length + 1 )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.eos_token_id].set(0 ) , _SCREAMING_SNAKE_CASE )
return scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
_UpperCAmelCase = min_length
_UpperCAmelCase = eos_token_id
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE )
return scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = list(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = begin_index
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = 1 - jnp.bool_(cur_len - self.begin_index )
_UpperCAmelCase = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE )
return scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_UpperCAmelCase = list(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
_UpperCAmelCase = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = dict(_SCREAMING_SNAKE_CASE )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
_UpperCAmelCase = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
_UpperCAmelCase = force_token_array.at[index].set(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.intaa(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> jnp.ndarray:
"""simple docstring"""
def _force_token(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = scores.shape[0]
_UpperCAmelCase = self.force_token_array[generation_idx]
_UpperCAmelCase = jnp.ones_like(_SCREAMING_SNAKE_CASE , dtype=scores.dtype ) * -float('inf' )
_UpperCAmelCase = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
_UpperCAmelCase = lax.dynamic_update_slice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (0, current_token) )
return new_scores
_UpperCAmelCase = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_SCREAMING_SNAKE_CASE ) , lambda: scores , ) , )
return scores
class __a ( _a ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = generate_config.eos_token_id
_UpperCAmelCase = generate_config.no_timestamps_token_id
_UpperCAmelCase = generate_config.no_timestamps_token_id + 1
_UpperCAmelCase = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_SCREAMING_SNAKE_CASE , 'max_initial_timestamp_index' ):
_UpperCAmelCase = generate_config.max_initial_timestamp_index
else:
_UpperCAmelCase = model_config.vocab_size
if self.max_initial_timestamp_index is None:
_UpperCAmelCase = model_config.vocab_size
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = jnp.where((cur_len - self.begin_index) >= 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = jnp.where((cur_len - self.begin_index) < 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return jnp.where(
_SCREAMING_SNAKE_CASE , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(cur_len == self.begin_index , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = self.timestamp_begin + self.max_initial_timestamp_index
_UpperCAmelCase = jnp.where(
_SCREAMING_SNAKE_CASE , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE , )
# if sum of probability over timestamps is above any other token, sample timestamp
_UpperCAmelCase = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
def handle_cumulative_probs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
_UpperCAmelCase = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , _SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = jax.vmap(_SCREAMING_SNAKE_CASE )(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return scores
| 368 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 255 , _SCREAMING_SNAKE_CASE=True , ) -> str:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : Tuple = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'image_id': 39769, 'annotations': target}
# encode them
_UpperCAmelCase = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_UpperCAmelCase = ConditionalDetrImageProcessor(format='coco_panoptic' )
_UpperCAmelCase = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify area
_UpperCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
_UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
# verify image_id
_UpperCAmelCase = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
_UpperCAmelCase = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
_UpperCAmelCase = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
_UpperCAmelCase = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
| 185 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_a = logging.getLogger(__name__)
@dataclass
class __A ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase_ = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
lowerCAmelCase_ = field(default=snake_case_ , metadata={"""help""": """Whether to SortishSamler or not."""} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
lowerCAmelCase_ = field(default=snake_case_ , metadata={"""help""": """whether to use adafactor"""} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
lowerCAmelCase_ = field(default=snake_case_ , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
lowerCAmelCase_ = field(
default=snake_case_ , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
lowerCAmelCase_ = field(
default="""linear""" , metadata={"""help""": F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 209 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
lowercase = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
lowercase = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = SqueezeBertTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Tuple:
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
snake_case_ = getattr(a , normalizer_state.pop('type' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**a )
snake_case_ = do_lower_case
def _UpperCamelCase ( self , a , a=None ) -> Tuple:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 178 | 0 |
'''simple docstring'''
from timeit import timeit
__a: str = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Any = 0
lowercase__ : Any = len(UpperCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : List[str] = len(UpperCAmelCase ) // 2
lowercase__ : int = len(UpperCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(UpperCAmelCase ) )
def __UpperCamelCase ( UpperCAmelCase ):
if len(UpperCAmelCase ) <= 2:
return True
if s[0] == s[len(UpperCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __UpperCamelCase ( UpperCAmelCase ):
return s == s[::-1]
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : List[str] = F"""all({name}(key) is value for key, value in test_data.items())"""
lowercase__ : Any = F"""from __main__ import test_data, {name}"""
lowercase__ : Optional[Any] = 50_0000
lowercase__ : List[str] = timeit(stmt=UpperCAmelCase , setup=UpperCAmelCase , number=UpperCAmelCase )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'{key:21} {value}')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 214 | '''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=32 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase=[0, 1, 2, 3] , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=[1, 384, 24, 24] , __lowerCAmelCase=True , __lowerCAmelCase=None , ) -> Dict:
lowercase__ : str = parent
lowercase__ : List[Any] = batch_size
lowercase__ : Dict = image_size
lowercase__ : Tuple = patch_size
lowercase__ : str = num_channels
lowercase__ : Dict = is_training
lowercase__ : Optional[int] = use_labels
lowercase__ : List[Any] = hidden_size
lowercase__ : int = num_hidden_layers
lowercase__ : int = backbone_out_indices
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : str = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : List[Any] = initializer_range
lowercase__ : Optional[int] = num_labels
lowercase__ : Optional[int] = backbone_featmap_shape
lowercase__ : int = scope
lowercase__ : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
lowercase__ : List[str] = (image_size // patch_size) ** 2
lowercase__ : Tuple = num_patches + 1
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : int = None
if self.use_labels:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__lowerCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
lowercase__ : Optional[int] = DPTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
lowercase__ : Union[str, Any] = self.num_labels
lowercase__ : str = DPTForDepthEstimation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : str = self.num_labels
lowercase__ : Tuple = DPTForSemanticSegmentation(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase__ : Dict = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
SCREAMING_SNAKE_CASE = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = DPTModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def _lowerCAmelCase( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _lowerCAmelCase( self ) -> Tuple:
pass
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(__lowerCAmelCase )
lowercase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> List[str]:
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Dict = True
if model_class in get_values(__lowerCAmelCase ):
continue
lowercase__ : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
lowercase__ : Tuple = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase__ : str = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowerCAmelCase( self ) -> Tuple:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = False
lowercase__ : str = True
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
lowercase__ : List[str] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase__ : List[Any] = model(**__lowerCAmelCase ).loss
loss.backward()
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = _config_zero_init(__lowerCAmelCase )
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(config=__lowerCAmelCase )
# Skip the check for the backbone
lowercase__ : Union[str, Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
lowercase__ : List[Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowerCAmelCase( self ) -> List[str]:
pass
@slow
def _lowerCAmelCase( self ) -> List[Any]:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
lowercase__ : Dict = DPTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = '''add'''
with self.assertRaises(__lowerCAmelCase ):
lowercase__ : Tuple = DPTForDepthEstimation(__lowerCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> Any:
lowercase__ : Optional[int] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
lowercase__ : List[Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__lowerCAmelCase )
lowercase__ : Optional[Any] = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**__lowerCAmelCase )
lowercase__ : str = outputs.predicted_depth
# verify the predicted depth
lowercase__ : Optional[Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __lowerCAmelCase )
lowercase__ : str = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __lowerCAmelCase , atol=1E-4 ) )
| 214 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : List[str] = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class __lowercase ( _lowercase ):
lowerCamelCase : Dict = "vit"
def __init__(self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1E-12 , A=2_2_4 , A=1_6 , A=3 , A=True , A=1_6 , **A , ):
super().__init__(**A )
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : Dict = num_attention_heads
lowerCamelCase_ : int = intermediate_size
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : int = hidden_dropout_prob
lowerCamelCase_ : List[str] = attention_probs_dropout_prob
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : Tuple = image_size
lowerCamelCase_ : Dict = patch_size
lowerCamelCase_ : Optional[int] = num_channels
lowerCamelCase_ : Optional[Any] = qkv_bias
lowerCamelCase_ : Union[str, Any] = encoder_stride
class __lowercase ( _lowercase ):
lowerCamelCase : List[Any] = version.parse("1.11" )
@property
def UpperCAmelCase__ (self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ (self ):
return 1E-4
| 318 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=13 , UpperCAmelCase__ : Dict=30 , UpperCAmelCase__ : List[str]=2 , UpperCAmelCase__ : Dict=3 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : str=5 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Tuple=37 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int=2 , ) -> Dict:
_a : int = parent
_a : Optional[Any] = batch_size
_a : Tuple = image_size
_a : List[str] = patch_size
_a : Tuple = num_channels
_a : List[str] = is_training
_a : List[Any] = use_labels
_a : List[Any] = hidden_size
_a : int = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : List[str] = intermediate_size
_a : Optional[Any] = hidden_act
_a : Tuple = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : List[Any] = type_sequence_label_size
_a : Dict = initializer_range
_a : Optional[int] = scope
_a : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : List[Any] = (image_size // patch_size) ** 2
_a : List[Any] = num_patches + 1
def _lowercase ( self : int ) -> Optional[Any]:
_a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[int] = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : List[str] ) -> str:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ) -> str:
_a : Dict = ViTModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Optional[int] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str ) -> Tuple:
_a : Dict = ViTForMaskedImageModeling(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Optional[int] = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a : List[str] = 1
_a : Optional[Any] = ViTForMaskedImageModeling(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : int = model(UpperCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
_a : Union[str, Any] = self.type_sequence_label_size
_a : int = ViTForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : int = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : Union[str, Any] = 1
_a : List[str] = ViTForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Any = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) : List[Any] = config_and_inputs
_a : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase : List[str] = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase : Dict = True
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
_a : Optional[Any] = ViTModelTester(self )
_a : int = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def _lowercase ( self : Optional[int] ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _lowercase ( self : Tuple ) -> Tuple:
pass
def _lowercase ( self : Dict ) -> int:
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowercase ( self : Dict ) -> Tuple:
_a , _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[Any] = model_class(UpperCAmelCase__ )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) -> str:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> Dict:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : List[Any] ) -> Optional[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Optional[Any] = ViTModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _lowercase ( self : Dict ) -> Optional[int]:
_a : Tuple = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(UpperCAmelCase__ )
_a : Union[str, Any] = self.default_image_processor
_a : str = prepare_img()
_a : Optional[Any] = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
_a : List[str] = model(**UpperCAmelCase__ )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
_a : str = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def _lowercase ( self : Dict ) -> int:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_a : Any = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(UpperCAmelCase__ )
_a : Optional[int] = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
_a : str = prepare_img()
_a : Dict = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" )
_a : List[Any] = inputs.pixel_values.to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
_a : Dict = model(UpperCAmelCase__ , interpolate_pos_encoding=UpperCAmelCase__ )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase__ )
_a : Optional[int] = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _lowercase ( self : Dict ) -> Union[str, Any]:
_a : List[str] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
_a : str = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" )
_a : List[str] = inputs.pixel_values.to(UpperCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_a : List[str] = model(UpperCAmelCase__ )
| 324 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a , _a : Dict = len(UpperCamelCase__ ), len(grid[0] )
if (
min(UpperCamelCase__ , UpperCamelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_a : Any = 0
count += depth_first_search(UpperCamelCase__ , row + 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , row - 1 , UpperCamelCase__ , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col + 1 , UpperCamelCase__ )
count += depth_first_search(UpperCamelCase__ , UpperCamelCase__ , col - 1 , UpperCamelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324 | 1 |
"""simple docstring"""
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
__magic_name__ = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
__magic_name__ = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = (images / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE = numpy_to_pil(UpperCamelCase_ )
return images
def _lowerCAmelCase ( UpperCamelCase_ ):
if images.ndim == 3:
__SCREAMING_SNAKE_CASE = images[None, ...]
__SCREAMING_SNAKE_CASE = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__SCREAMING_SNAKE_CASE = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
__SCREAMING_SNAKE_CASE = [Image.fromarray(UpperCamelCase_ ) for image in images]
return pil_images
| 100 | """simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCamelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCamelCase = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__UpperCamelCase = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = None
# source code of `config_class`
snake_case_ = inspect.getsource(UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
snake_case_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ = ckpt_name
break
return checkpoint
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ = get_checkpoint_from_config_class(UpperCAmelCase )
snake_case_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = '\n'.join(sorted(UpperCAmelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 69 | 0 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=2 , _lowerCAmelCase : Dict=8 , _lowerCAmelCase : str=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Dict=99 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Dict=5 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[Any]=36 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : int=0.0 , _lowerCAmelCase : Dict=512 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : int=3 , _lowerCAmelCase : str=4 , _lowerCAmelCase : Tuple=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : List[Any] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = 300
return config
def lowerCAmelCase_ ( self : Dict ):
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = MraModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = MraModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = MraForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = MraForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MraForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = MraForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = MraForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = ()
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = MraModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = MraModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason='MRA does not output attentions' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
return
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
SCREAMING_SNAKE_CASE_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
SCREAMING_SNAKE_CASE_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = 50_265
SCREAMING_SNAKE_CASE_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[[9.2595, -3.6038, 11.8_819], [9.3869, -3.2693, 11.0_956], [11.8_524, -3.4938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
SCREAMING_SNAKE_CASE_ = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ = 50_265
SCREAMING_SNAKE_CASE_ = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) ) | 353 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : List[str]=18 , _lowerCAmelCase : Any=30 , _lowerCAmelCase : List[Any]=400 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]=[0.5, 0.5, 0.5] , _lowerCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE_ = size if size is not None else {'shortest_edge': 18}
SCREAMING_SNAKE_CASE_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = do_center_crop
SCREAMING_SNAKE_CASE_ = crop_size
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = image_mean
SCREAMING_SNAKE_CASE_ = image_std
def lowerCAmelCase_ ( self : List[Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = LevitImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = LevitImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCAmelCase_ ( self : Dict ):
pass
def lowerCAmelCase_ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : str ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCAmelCase_ ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , ) | 210 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__SCREAMING_SNAKE_CASE : List[str] = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def UpperCamelCase_ ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=None , ) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_UpperCAmelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_UpperCAmelCase : Union[str, Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase : Union[str, Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , A : List[str] , A : List[Any]=13 , A : Tuple=7 , A : Union[str, Any]=True , A : int=False , A : Union[str, Any]=99 , A : Dict=16 , A : Any=2 , A : int=4 , A : int=4 , A : str="gelu" , A : List[Any]=0.1 , A : Optional[int]=0.1 , A : Any=32 , A : List[str]=2 , A : Any=1 , A : int=0 , A : int=0.02 , ):
_UpperCAmelCase : int = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Any = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Any = pad_token_id
_UpperCAmelCase : Tuple = bos_token_id
_UpperCAmelCase : Tuple = initializer_range
def _A ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCAmelCase : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCAmelCase : str = shift_tokens_right(A , 1 , 2 )
_UpperCAmelCase : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A , )
_UpperCAmelCase : Tuple = prepare_blenderbot_inputs_dict(A , A , A )
return config, inputs_dict
def _A ( self : List[Any] ):
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self : Optional[Any] , A : Optional[Any] , A : Optional[int] , A : List[str] ):
_UpperCAmelCase : str = 20
_UpperCAmelCase : Optional[Any] = model_class_name(A )
_UpperCAmelCase : List[Any] = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : List[Any] = model.init_cache(decoder_input_ids.shape[0] , A , A )
_UpperCAmelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_UpperCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : Dict = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
_UpperCAmelCase : Optional[Any] = model.decode(A , A )
_UpperCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def _A ( self : Optional[int] , A : List[str] , A : List[str] , A : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = 20
_UpperCAmelCase : List[str] = model_class_name(A )
_UpperCAmelCase : Dict = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase : Any = model.init_cache(decoder_input_ids.shape[0] , A , A )
_UpperCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_UpperCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
_UpperCAmelCase : Dict = model.decode(A , A , decoder_attention_mask=A )
_UpperCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = 9_9
def _A ( self : List[str] ):
_UpperCAmelCase : Tuple = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCAmelCase : int = input_ids.shape[0]
_UpperCAmelCase : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _A ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self._get_config_and_data()
_UpperCAmelCase : int = FlaxBlenderbotSmallForConditionalGeneration(A )
_UpperCAmelCase : int = lm_model(input_ids=A )
_UpperCAmelCase : List[str] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , A )
def _A ( self : Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCAmelCase : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(A )
_UpperCAmelCase : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCAmelCase : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCAmelCase : List[str] = lm_model(input_ids=A , decoder_input_ids=A )
_UpperCAmelCase : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , A )
def _A ( self : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCAmelCase : Dict = shift_tokens_right(A , 1 , 2 )
_UpperCAmelCase : Dict = np.equal(A , 1 ).astype(np.floataa ).sum()
_UpperCAmelCase : str = np.equal(A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ (snake_case__ , unittest.TestCase , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = True
__UpperCamelCase: Tuple = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase: List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _A ( self : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = FlaxBlenderbotSmallModelTester(self )
def _A ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def _A ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def _A ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Any = self._prepare_for_class(A , A )
_UpperCAmelCase : Any = model_class(A )
@jax.jit
def encode_jitted(A : Dict , A : str=None , **A : List[Any] ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : int = encode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : Optional[int] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def _A ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Dict = model_class(A )
_UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase : str = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(A : Dict , A : Tuple , A : List[str] ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : str = decode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : Dict = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase : int = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCAmelCase : Dict = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCAmelCase : Optional[int] = model(A )
self.assertIsNotNone(A )
| 31 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
super().__init__()
_UpperCAmelCase : Optional[int] = nn.ModuleList(A )
def _A ( self : Dict , A : torch.FloatTensor , A : Union[torch.Tensor, float, int] , A : torch.Tensor , A : List[torch.tensor] , A : List[float] , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[torch.Tensor] = None , A : Optional[Dict[str, Any]] = None , A : bool = False , A : bool = True , ):
for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ):
_UpperCAmelCase , _UpperCAmelCase : str = controlnet(
A , A , A , A , A , A , A , A , A , A , A , )
# merge samples
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = down_samples, mid_sample
else:
_UpperCAmelCase : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A , A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _A ( self : List[str] , A : Union[str, os.PathLike] , A : bool = True , A : Callable = None , A : bool = False , A : Optional[str] = None , ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : str = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , )
idx += 1
_UpperCAmelCase : Tuple = model_path_to_save + F"""_{idx}"""
@classmethod
def _A ( cls : int , A : Optional[Union[str, os.PathLike]] , **A : Tuple ):
_UpperCAmelCase : str = 0
_UpperCAmelCase : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_UpperCAmelCase : int = pretrained_model_path
while os.path.isdir(A ):
_UpperCAmelCase : List[str] = ControlNetModel.from_pretrained(A , **A )
controlnets.append(A )
idx += 1
_UpperCAmelCase : Dict = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(A )} controlnets loaded from {pretrained_model_path}.""" )
if len(A ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(A )
| 31 | 1 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Tuple = CustomTokenizer
pass
| 369 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__) # pylint: disable=invalid-name
__SCREAMING_SNAKE_CASE :List[str] = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Union[PIL.Image.Image, np.ndarray]
class A_ ( lowerCAmelCase_ ):
def __init__( self : Any , snake_case_ : PriorTransformer , snake_case_ : CLIPVisionModel , snake_case_ : CLIPImageProcessor , snake_case_ : HeunDiscreteScheduler , snake_case_ : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , )
def lowercase ( self : List[Any] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
if latents is None:
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_UpperCAmelCase = latents.to(snake_case_ )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : Optional[Any] , snake_case_ : Union[str, Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase = torch.device(f'cuda:{gpu_id}' )
_UpperCAmelCase = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
def lowercase ( self : List[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase ( self : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] , ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 )
if not isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = self.image_processor(snake_case_ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
_UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=snake_case_ )
_UpperCAmelCase = self.image_encoder(snake_case_ )["last_hidden_state"]
_UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_UpperCAmelCase = image_embeds.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = torch.zeros_like(snake_case_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self : str , snake_case_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , snake_case_ : int = 1 , snake_case_ : int = 2_5 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[torch.FloatTensor] = None , snake_case_ : float = 4.0 , snake_case_ : int = 6_4 , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_UpperCAmelCase = len(snake_case_ )
else:
raise ValueError(
f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}' )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# prior
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.prior.config.num_embeddings
_UpperCAmelCase = self.prior.config.embedding_dim
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_UpperCAmelCase = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
_UpperCAmelCase = self.prior(
snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding
# remove the variance
_UpperCAmelCase , _UpperCAmelCase = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_UpperCAmelCase , _UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_UpperCAmelCase = self.scheduler.step(
snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case_ )
_UpperCAmelCase = []
for i, latent in enumerate(snake_case_ ):
print()
_UpperCAmelCase = self.renderer.decode(
latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(snake_case_ )
_UpperCAmelCase = torch.stack(snake_case_ )
if output_type not in ["np", "pil"]:
raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' )
_UpperCAmelCase = images.cpu().numpy()
if output_type == "pil":
_UpperCAmelCase = [self.numpy_to_pil(snake_case_ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case_ )
| 156 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case__( metaclass=__UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""speech"""]
def __init__( self , *__lowercase , **__lowercase ) -> List[Any]:
requires_backends(self , ['''speech'''] )
class snake_case__( metaclass=__UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ["""speech"""]
def __init__( self , *__lowercase , **__lowercase ) -> Tuple:
requires_backends(self , ['''speech'''] ) | 262 |
"""simple docstring"""
import os
def lowercase_ ( _UpperCAmelCase = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) as input_file:
A_ : List[Any] = [
[int(_UpperCAmelCase ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
A_ : Dict = len(_UpperCAmelCase )
A_ : Union[str, Any] = len(matrix[0] )
A_ : Optional[Any] = [[-1 for _ in range(_UpperCAmelCase )] for _ in range(_UpperCAmelCase )]
for i in range(_UpperCAmelCase ):
A_ : str = matrix[i][0]
for j in range(1 , _UpperCAmelCase ):
for i in range(_UpperCAmelCase ):
A_ : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _UpperCAmelCase ):
A_ : Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
A_ : int = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 167 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = CycleDiffusionPipeline
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""})
UpperCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __A ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , num_train_timesteps=1000 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE : Tuple = CLIPTextModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __A ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''prompt''': '''An astronaut riding an elephant''',
'''source_prompt''': '''An astronaut riding a horse''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''eta''': 0.1,
'''strength''': 0.8,
'''guidance_scale''': 3,
'''source_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components()
SCREAMING_SNAKE_CASE : Dict = CycleDiffusionPipeline(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = pipe(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Dict = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCamelCase__ , '''half''' ):
SCREAMING_SNAKE_CASE : str = module.half()
SCREAMING_SNAKE_CASE : Any = CycleDiffusionPipeline(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.get_dummy_inputs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pipe(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = output.images
SCREAMING_SNAKE_CASE : Any = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def __A ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_local()
@unittest.skip('''non-deterministic pipeline''' )
def __A ( self : List[str] ):
'''simple docstring'''
return super().test_inference_batch_single_identical()
@skip_mps
def __A ( self : List[Any] ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def __A ( self : Any ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy''' )
SCREAMING_SNAKE_CASE : Tuple = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : int = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE : List[str] = CycleDiffusionPipeline.from_pretrained(
UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa , revision='''fp16''' )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Union[str, Any] = '''A black colored car'''
SCREAMING_SNAKE_CASE : int = '''A blue colored car'''
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = pipe(
prompt=UpperCamelCase__ , source_prompt=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : List[str] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/cycle-diffusion/black_colored_car.png''' )
SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy''' )
SCREAMING_SNAKE_CASE : Tuple = init_image.resize((512, 512) )
SCREAMING_SNAKE_CASE : Optional[int] = '''CompVis/stable-diffusion-v1-4'''
SCREAMING_SNAKE_CASE : Dict = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE : Optional[Any] = CycleDiffusionPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE : Union[str, Any] = '''A black colored car'''
SCREAMING_SNAKE_CASE : Tuple = '''A blue colored car'''
SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Dict = pipe(
prompt=UpperCamelCase__ , source_prompt=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase__ , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 258 | import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Dict = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """encodec"""
def __init__( self : int , UpperCamelCase__ : Optional[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase__ : List[str]=2_4000 , UpperCamelCase__ : str=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=128 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Any=[8, 5, 4, 2] , UpperCamelCase__ : Optional[Any]="weight_norm" , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]="reflect" , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Tuple=1.0 , UpperCamelCase__ : Dict=1024 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = target_bandwidths
SCREAMING_SNAKE_CASE : int = sampling_rate
SCREAMING_SNAKE_CASE : List[Any] = audio_channels
SCREAMING_SNAKE_CASE : List[str] = normalize
SCREAMING_SNAKE_CASE : Any = chunk_length_s
SCREAMING_SNAKE_CASE : Optional[int] = overlap
SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_filters
SCREAMING_SNAKE_CASE : int = num_residual_layers
SCREAMING_SNAKE_CASE : List[Any] = upsampling_ratios
SCREAMING_SNAKE_CASE : Dict = norm_type
SCREAMING_SNAKE_CASE : List[Any] = kernel_size
SCREAMING_SNAKE_CASE : int = last_kernel_size
SCREAMING_SNAKE_CASE : str = residual_kernel_size
SCREAMING_SNAKE_CASE : int = dilation_growth_rate
SCREAMING_SNAKE_CASE : List[Any] = use_causal_conv
SCREAMING_SNAKE_CASE : List[Any] = pad_mode
SCREAMING_SNAKE_CASE : str = compress
SCREAMING_SNAKE_CASE : Dict = num_lstm_layers
SCREAMING_SNAKE_CASE : List[str] = trim_right_ratio
SCREAMING_SNAKE_CASE : Optional[int] = codebook_size
SCREAMING_SNAKE_CASE : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : List[str] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**UpperCamelCase__ )
@property
def __A ( self : List[str] ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __A ( self : Dict ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 258 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''ConditionalDetrFeatureExtractor''']
snake_case_ = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : int = """switch_transformers"""
__lowerCamelCase : Optional[Any] = ["""past_key_values"""]
__lowerCamelCase : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , a=3_2128 , a=768 , a=64 , a=2048 , a=64 , a=12 , a=3 , a=12 , a=3 , a=12 , a=8 , a=False , a=0.01 , a="float32" , a=False , a=32 , a=128 , a=0.1 , a=1e-6 , a=0.001 , a=0.001 , a=1.0 , a="relu" , a=True , a=False , a=True , a=0 , a=1 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[Any] = d_model
lowercase__ : List[Any] = d_kv
lowercase__ : Any = d_ff
lowercase__ : Optional[int] = num_sparse_encoder_layers
lowercase__ : Tuple = num_layers
lowercase__ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : str = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase__ : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase__ : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase__ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase__ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase__ : List[Any] = num_heads
lowercase__ : Union[str, Any] = num_experts
lowercase__ : str = expert_capacity
lowercase__ : List[Any] = router_bias
lowercase__ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
lowercase__ : str = router_dtype
lowercase__ : Optional[int] = router_ignore_padding_tokens
lowercase__ : int = relative_attention_num_buckets
lowercase__ : Optional[Any] = relative_attention_max_distance
lowercase__ : List[str] = dropout_rate
lowercase__ : str = layer_norm_epsilon
lowercase__ : int = initializer_factor
lowercase__ : int = feed_forward_proj
lowercase__ : Dict = use_cache
lowercase__ : int = add_router_probs
lowercase__ : int = router_z_loss_coef
lowercase__ : List[Any] = router_aux_loss_coef
lowercase__ : int = self.feed_forward_proj.split('-')
lowercase__ : Optional[int] = act_info[-1]
lowercase__ : Dict = act_info[0] == 'gated'
if len(a) > 1 and act_info[0] != "gated" or len(a) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase__ : Optional[int] = 'gelu_new'
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , **a , )
| 214 | 1 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict ) -> Optional[int]:
UpperCAmelCase_ = s.rsplit(__UpperCamelCase , __UpperCamelCase )
return new.join(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> Any:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Tuple:
UpperCAmelCase_ = {}
UpperCAmelCase_ = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase_ = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
UpperCAmelCase_ = key.replace('''res_path.''' , '''res_path.path.''' )
if key.endswith('''.w''' ):
UpperCAmelCase_ = rreplace(__UpperCamelCase , '''.w''' , '''.weight''' , 1 )
if key.endswith('''.b''' ):
UpperCAmelCase_ = rreplace(__UpperCamelCase , '''.b''' , '''.bias''' , 1 )
UpperCAmelCase_ = value.float()
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[Any]=True ) -> Tuple:
from dall_e import Encoder
UpperCAmelCase_ = Encoder()
if os.path.exists(__UpperCamelCase ):
UpperCAmelCase_ = torch.load(__UpperCamelCase )
else:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase_ = ckpt.state_dict()
encoder.load_state_dict(__UpperCamelCase )
if config_path is not None:
UpperCAmelCase_ = FlavaImageCodebookConfig.from_pretrained(__UpperCamelCase )
else:
UpperCAmelCase_ = FlavaImageCodebookConfig()
UpperCAmelCase_ = FlavaImageCodebook(__UpperCamelCase ).eval()
UpperCAmelCase_ = encoder.state_dict()
UpperCAmelCase_ = upgrade_state_dict(__UpperCamelCase )
hf_model.load_state_dict(__UpperCamelCase )
UpperCAmelCase_ = hf_model.state_dict()
UpperCAmelCase_ = count_parameters(__UpperCamelCase )
UpperCAmelCase_ = count_parameters(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(__UpperCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_lowerCamelCase = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 177 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177 | 1 |
"""simple docstring"""
import os
from collections.abc import Iterator
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
for dir_path, dir_names, filenames in os.walk(A_ ):
lowerCAmelCase__ : str = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A_ )[1] in (".py", ".ipynb"):
yield os.path.join(A_ , A_ ).lstrip('''./''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
return f'{i * " "}*' if i else "\n##"
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A_ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A_ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
lowerCAmelCase__ : Any = ''''''
for filepath in sorted(good_file_paths(A_ ) ):
lowerCAmelCase__ ,lowerCAmelCase__ : str = os.path.split(A_ )
if filepath != old_path:
lowerCAmelCase__ : str = print_path(A_ , A_ )
lowerCAmelCase__ : str = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase__ : Union[str, Any] = f'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
lowerCAmelCase__ : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'{md_prefix(A_ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 106 |
"""simple docstring"""
import os
from collections.abc import Iterator
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
for dir_path, dir_names, filenames in os.walk(A_ ):
lowerCAmelCase__ : str = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A_ )[1] in (".py", ".ipynb"):
yield os.path.join(A_ , A_ ).lstrip('''./''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
return f'{i * " "}*' if i else "\n##"
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Optional[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A_ ) or old_parts[i] != new_part) and new_part:
print(f'{md_prefix(A_ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def __SCREAMING_SNAKE_CASE ( A_ = "." ):
lowerCAmelCase__ : Any = ''''''
for filepath in sorted(good_file_paths(A_ ) ):
lowerCAmelCase__ ,lowerCAmelCase__ : str = os.path.split(A_ )
if filepath != old_path:
lowerCAmelCase__ : str = print_path(A_ , A_ )
lowerCAmelCase__ : str = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase__ : Union[str, Any] = f'{filepath}/{filename}'.replace(''' ''' , '''%20''' )
lowerCAmelCase__ : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f'{md_prefix(A_ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 106 | 1 |
'''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
if (
not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = GPTSanJapaneseTokenizer
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = {"""do_clean_text""": False, """add_prefix_space""": False}
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
UpperCAmelCase__ = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Any ):
"""simple docstring"""
UpperCAmelCase__ = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
UpperCAmelCase__ = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : str ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ = """こんにちは、世界。 こんばんは、㔺界。"""
UpperCAmelCase__ = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
UpperCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
UpperCAmelCase__ = tokens + [tokenizer.unk_token]
UpperCAmelCase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
UpperCAmelCase__ = """こんにちは、、、、世界。こんばんは、、、、世界。"""
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase__ = """こんにちは、世界。"""
UpperCAmelCase__ = """こんばんは、㔺界。😀"""
UpperCAmelCase__ = """こんにちは、世界。こんばんは、世界。😀"""
UpperCAmelCase__ = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase__ = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , prefix_text=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
UpperCAmelCase__ = """こんにちは、世界。"""
UpperCAmelCase__ = """こんばんは、㔺界。😀"""
UpperCAmelCase__ = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ = len(tokenizer.encode(_UpperCAmelCase ) ) - 2
UpperCAmelCase__ = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase__ = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , prefix_text=_UpperCAmelCase ).token_type_ids
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase__ = tokenizer.encode("""あンいワ""" )
UpperCAmelCase__ = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
UpperCAmelCase__ = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , tokenizer.decode(_UpperCAmelCase ) )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
UpperCAmelCase__ = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
UpperCAmelCase__ = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.batch_encode_plus(_UpperCAmelCase , padding=_UpperCAmelCase )
# fmt: off
UpperCAmelCase__ = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
UpperCAmelCase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token.attention_mask , _UpperCAmelCase )
self.assertListEqual(x_token_a.input_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , _UpperCAmelCase )
self.assertListEqual(x_token_a.attention_mask , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
| 61 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase_ : Optional[int] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCamelCase ( lowercase : Tuple , lowercase : str , lowercase : int=None ) -> str:
if rng is None:
_a = random.Random()
_a = 1
for dim in shape:
total_dims *= dim
_a = []
for _ in range(lowercase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_a = np.array(lowercase , dtype=jnp.intaa ).reshape(lowercase )
return output
def _lowerCamelCase ( lowercase : str , lowercase : Union[str, Any]=None ) -> List[Any]:
_a = ids_tensor(lowercase , vocab_size=2 , rng=lowercase )
# make sure that at least one token is attended to for each batch
_a = 1
return attn_mask
@require_flax
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =None
__a =()
def UpperCamelCase__ ( self : int ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_a = 2
_a = inputs["input_ids"].shape[-1] // 2
_a = inputs["input_ids"][:max_batch_size, :sequence_length]
_a = jnp.ones_like(__a )
_a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase__ ( self : Tuple ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 0
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model_class.__name__[4:] # Skip the "Flax" at the beginning
_a = getattr(__a , __a )
_a = pt_model_class(__a ).eval()
_a = load_flax_weights_in_pytorch_model(__a , flax_model.params )
_a = flax_model.generate(__a ).sequences
_a = pt_model.generate(torch.tensor(__a , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase__ ( self : Any ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : int ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = True
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : int ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 2
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = False
_a = max_length
_a = 2
_a = 2
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase__ ( self : Optional[int] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = True
_a = max_length
_a = 0.8
_a = 10
_a = 0.3
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : int ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = max_length
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : List[Any] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
_a = max_length
_a = 2
_a = 1
_a = 8
_a = 9
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : List[str] ):
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = False
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : str ):
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = True
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase__ ( self : Any ):
_a , _a , _a , _a = self._get_input_ids_and_config()
# pad attention mask on the left
_a = attention_mask.at[(0, 0)].set(0 )
_a = 2
_a = max_length
for model_class in self.all_generative_model_classes:
_a = model_class(__a )
_a = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
_a = jit(model.generate )
_a = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
_a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
_a = "Hello world"
_a = tokenizer(__a , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__a , "do_samples" ):
model.generate(__a , do_samples=__a )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__a , "foo" ):
_a = {"foo": "bar"}
model.generate(__a , **__a )
| 63 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
a = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 271 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : "DiagonalGaussianDistribution"
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = True
@register_to_config
def __init__( self : List[str] , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 3 , _UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , _UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , _UpperCAmelCase : Tuple[int] = (64,) , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = "silu" , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : int = 32 , _UpperCAmelCase : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
_A = Encoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , down_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , act_fn=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , double_z=_UpperCAmelCase , )
# pass init params to Decoder
_A = Decoder(
in_channels=_UpperCAmelCase , out_channels=_UpperCAmelCase , up_block_types=_UpperCAmelCase , block_out_channels=_UpperCAmelCase , layers_per_block=_UpperCAmelCase , norm_num_groups=_UpperCAmelCase , act_fn=_UpperCAmelCase , )
_A = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_A = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , 1 )
_A = False
_A = False
# only relevant if vae tiling is enabled
_A = self.config.sample_size
_A = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_A = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_A = 0.25
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple=False ):
if isinstance(_UpperCAmelCase , (Encoder, Decoder) ):
_A = value
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : bool = True ):
_A = use_tiling
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.enable_tiling(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = True
def lowerCAmelCase_ ( self : str ):
_A = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase_ ( self : str ):
_A = {}
def fn_recursive_add_processors(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
_A = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return processors
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
_A = len(self.attn_processors.keys() )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(_UpperCAmelCase )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_UpperCAmelCase : str , _UpperCAmelCase : torch.nn.Module , _UpperCAmelCase : int ):
if hasattr(_UpperCAmelCase , 'set_processor' ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
module.set_processor(_UpperCAmelCase )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , _UpperCAmelCase , _UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
_A = [self.encoder(_UpperCAmelCase ) for x_slice in x.split(1 )]
_A = torch.cat(_UpperCAmelCase )
else:
_A = self.encoder(_UpperCAmelCase )
_A = self.quant_conv(_UpperCAmelCase )
_A = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_UpperCAmelCase , return_dict=_UpperCAmelCase )
_A = self.post_quant_conv(_UpperCAmelCase )
_A = self.decoder(_UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
@apply_forward_hook
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
if self.use_slicing and z.shape[0] > 1:
_A = [self._decode(_UpperCAmelCase ).sample for z_slice in z.split(1 )]
_A = torch.cat(_UpperCAmelCase )
else:
_A = self._decode(_UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ):
_A = min(a.shape[2] , b.shape[2] , _UpperCAmelCase )
for y in range(_UpperCAmelCase ):
_A = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ):
_A = min(a.shape[3] , b.shape[3] , _UpperCAmelCase )
for x in range(_UpperCAmelCase ):
_A = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
_A = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_A = int(self.tile_latent_min_size * self.tile_overlap_factor )
_A = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_A = []
for i in range(0 , x.shape[2] , _UpperCAmelCase ):
_A = []
for j in range(0 , x.shape[3] , _UpperCAmelCase ):
_A = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_A = self.encoder(_UpperCAmelCase )
_A = self.quant_conv(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
_A = []
for i, row in enumerate(_UpperCAmelCase ):
_A = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
_A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
_A = torch.cat(_UpperCAmelCase , dim=2 )
_A = DiagonalGaussianDistribution(_UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True ):
_A = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_A = int(self.tile_sample_min_size * self.tile_overlap_factor )
_A = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_A = []
for i in range(0 , z.shape[2] , _UpperCAmelCase ):
_A = []
for j in range(0 , z.shape[3] , _UpperCAmelCase ):
_A = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_A = self.post_quant_conv(_UpperCAmelCase )
_A = self.decoder(_UpperCAmelCase )
row.append(_UpperCAmelCase )
rows.append(_UpperCAmelCase )
_A = []
for i, row in enumerate(_UpperCAmelCase ):
_A = []
for j, tile in enumerate(_UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A = self.blend_v(rows[i - 1][j] , _UpperCAmelCase , _UpperCAmelCase )
if j > 0:
_A = self.blend_h(row[j - 1] , _UpperCAmelCase , _UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_UpperCAmelCase , dim=3 ) )
_A = torch.cat(_UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[torch.Generator] = None , ):
_A = sample
_A = self.encode(_UpperCAmelCase ).latent_dist
if sample_posterior:
_A = posterior.sample(generator=_UpperCAmelCase )
else:
_A = posterior.mode()
_A = self.decode(_UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_UpperCAmelCase )
| 271 | 1 |
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return 1
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase = 200 ) -> int:
return two_pound(lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 124 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = XLMRobertaTokenizer
_snake_case = XLMRobertaTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : Tuple = XLMRobertaTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self ) -> int:
snake_case : str = """<pad>"""
snake_case : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_0_0_2 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = XLMRobertaTokenizer(A , keep_accents=A )
snake_case : Tuple = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
snake_case : str = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
snake_case : Any = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def UpperCAmelCase ( self ) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
snake_case : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case : Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
snake_case : int = self.tokenizer_class.from_pretrained(A , **A )
snake_case : Optional[int] = tempfile.mkdtemp()
snake_case : List[Any] = tokenizer_r.save_pretrained(A )
snake_case : Optional[Any] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
snake_case : Union[str, Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
snake_case : str = tokenizer_r.from_pretrained(A )
snake_case : Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
snake_case : List[Any] = tempfile.mkdtemp()
snake_case : Tuple = tokenizer_r.save_pretrained(A , legacy_format=A )
snake_case : Any = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
snake_case : int = tokenizer_r.from_pretrained(A )
snake_case : Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
snake_case : Tuple = tempfile.mkdtemp()
snake_case : str = tokenizer_r.save_pretrained(A , legacy_format=A )
snake_case : Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case : Dict = tokenizer_r.from_pretrained(A )
snake_case : List[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@cached_property
def UpperCAmelCase ( self ) -> str:
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def UpperCAmelCase ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A , f.name )
snake_case : Any = XLMRobertaTokenizer(f.name , keep_accents=A )
snake_case : List[Any] = pickle.dumps(A )
pickle.loads(A )
def UpperCAmelCase ( self ) -> Any:
if not self.test_rust_tokenizer:
return
snake_case : str = self.get_tokenizer()
snake_case : List[str] = self.get_rust_tokenizer()
snake_case : str = """I was born in 92000, and this is falsé."""
snake_case : Optional[int] = tokenizer.tokenize(A )
snake_case : List[Any] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
snake_case : List[Any] = tokenizer.encode(A , add_special_tokens=A )
snake_case : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
snake_case : Optional[Any] = self.get_rust_tokenizer()
snake_case : str = tokenizer.encode(A )
snake_case : Dict = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Dict = """Hello World!"""
snake_case : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase ( self ) -> str:
snake_case : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
snake_case : Tuple = [
0,
3_2_9_3,
8_3,
1_0,
4_5_5_2,
4_9_8_9,
7_9_8_6,
6_7_8,
1_0,
5_9_1_5,
1_1_1,
1_7_9_4_5_9,
1_2_4_8_5_0,
4,
6_0_4_4,
2_3_7,
1_2,
6,
5,
6,
4,
6_7_8_0,
7_0_5,
1_5,
1_3_8_8,
4_4,
3_7_8,
1_0_1_1_4,
7_1_1,
1_5_2,
2_0,
6,
5,
2_2_3_7_6,
6_4_2,
1_2_2_1,
1_5_1_9_0,
3_4_1_5_3,
4_5_0,
5_6_0_8,
9_5_9,
1_1_1_9,
5_7_7_0_2,
1_3_6,
1_8_6,
4_7,
1_0_9_8,
2_9_3_6_7,
4_7,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_0_4_4,
2_3_7,
6_2_8_4,
5_0_9_0_1,
5_2_8,
3_1,
9_0,
3_4,
9_2_7,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(A , self.big_tokenizer.encode(A ) )
@slow
def UpperCAmelCase ( self ) -> str:
# fmt: off
snake_case : Tuple = {"""input_ids""": [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 124 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Union[str, Any] = logging.get_logger(__name__)
A__ : Optional[Any] = {'vocab_file': 'vocab.txt'}
A__ : Optional[int] = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
A__ : Tuple = {
'openbmb/cpm-ant-10b': 1_024,
}
def _snake_case ( lowerCamelCase__ : Dict ) -> List[Any]:
lowerCamelCase_ : str =collections.OrderedDict()
with open(lowerCamelCase__ , "r" , encoding="utf-8" ) as reader:
lowerCamelCase_ : Any =reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
lowerCamelCase_ : int =token.rstrip("\n" )
lowerCamelCase_ : Any =index
return vocab
class lowercase__ ( snake_case__ ):
def __init__( self : Any , snake_case__ : Dict , snake_case__ : Dict="<unk>" , snake_case__ : str=200 ):
lowerCamelCase_ : Dict =vocab
lowerCamelCase_ : Optional[Any] =unk_token
lowerCamelCase_ : Union[str, Any] =max_input_chars_per_word
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[Any] ):
lowerCamelCase_ : List[Any] =list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
lowerCamelCase_ : Optional[Any] =0
lowerCamelCase_ : str =[]
while start < len(__UpperCAmelCase ):
lowerCamelCase_ : List[str] =len(__UpperCAmelCase )
lowerCamelCase_ : str =None
while start < end:
lowerCamelCase_ : Optional[int] ="".join(chars[start:end] )
if substr in self.vocab:
lowerCamelCase_ : List[Any] =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCAmelCase )
lowerCamelCase_ : str =end
return sub_tokens
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :List[str] = ["input_ids", "attention_mask"]
_UpperCAmelCase :Optional[int] = False
def __init__( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[int]="<d>" , snake_case__ : int="</d>" , snake_case__ : Tuple="<s>" , snake_case__ : str="</s>" , snake_case__ : Dict="<pad>" , snake_case__ : int="<unk>" , snake_case__ : List[str]="</n>" , snake_case__ : Optional[int]="</_>" , snake_case__ : List[str]="left" , **snake_case__ : Optional[Any] , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=__UpperCAmelCase , eod_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , line_token=__UpperCAmelCase , space_token=__UpperCAmelCase , padding_side=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCamelCase_ : Optional[int] =bod_token
lowerCamelCase_ : Dict =eod_token
lowerCamelCase_ : str =load_vocab(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] =self.encoder[space_token]
lowerCamelCase_ : List[Any] =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowerCamelCase_ : Optional[int] =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
lowerCamelCase_ : Tuple ={v: k for k, v in self.encoder.items()}
lowerCamelCase_ : Any =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCAmelCase__ ( self : Any ):
return self.encoder[self.bod_token]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.encoder[self.eod_token]
@property
def UpperCAmelCase__ ( self : Any ):
return self.encoder["\n"]
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return len(self.encoder )
def UpperCAmelCase__ ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : Optional[int] ):
lowerCamelCase_ : Any =[]
for x in jieba.cut(__UpperCAmelCase , cut_all=__UpperCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCAmelCase ) )
return output_tokens
def UpperCAmelCase__ ( self : Any , snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
lowerCamelCase_ : Tuple =[i for i in token_ids if i >= 0]
lowerCamelCase_ : str =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : List[str] ):
return token in self.encoder
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : List[str] ):
return "".join(__UpperCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : int ):
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[int] ):
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def UpperCAmelCase__ ( self : str , snake_case__ : str , snake_case__ : Optional[str] = None ):
if os.path.isdir(__UpperCAmelCase ):
lowerCamelCase_ : List[Any] =os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowerCamelCase_ : Tuple =(filename_prefix + "-" if filename_prefix else "") + save_directory
lowerCamelCase_ : str =0
if " " in self.encoder:
lowerCamelCase_ : int =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
lowerCamelCase_ : str =self.encoder["\n"]
del self.encoder["\n"]
lowerCamelCase_ : List[str] =collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!" )
lowerCamelCase_ : Any =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : List[int] , snake_case__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase ))
return [1] + ([0] * len(__UpperCAmelCase ))
| 354 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[Any] = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[str] = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 209 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def a_ ( _lowerCAmelCase ) -> list[int]:
if num <= 0:
__lowerCamelCase : Any = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = [True] * (num + 1)
__lowerCamelCase : List[str] = []
__lowerCamelCase : Tuple = 2
__lowerCamelCase : str = int(math.sqrt(_lowerCAmelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_lowerCAmelCase )
# Set multiples of start be False
for i in range(start * start ,num + 1 ,_lowerCAmelCase ):
if sieve[i] is True:
__lowerCamelCase : List[str] = False
start += 1
for j in range(end + 1 ,num + 1 ):
if sieve[j] is True:
prime.append(_lowerCAmelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 208 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 | 0 |
import os
import numpy
import onnx
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]):
lowercase__ : str = a.name
lowercase__ : str = b.name
lowercase__ : Any = ""
lowercase__ : Optional[int] = ""
lowercase__ : List[Any] = a == b
lowercase__ : str = name_a
lowercase__ : str = name_b
return res
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : int):
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase)
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]):
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : str):
lowercase__ : int = list(model.graph.initializer)
lowercase__ : Any = list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase__ : int = inits[i].name
lowercase__ : Optional[int] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Tuple = os.path.dirname(_lowerCamelCase)
lowercase__ : Tuple = os.path.basename(_lowerCamelCase)
lowercase__ : Tuple = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase))
lowercase__ : int = list(model.graph.initializer)
lowercase__ : List[str] = set()
lowercase__ : List[Any] = {}
lowercase__ : List[str] = []
lowercase__ : Optional[int] = 0
for i in range(len(_lowerCamelCase)):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j]):
dup_set.add(_lowerCamelCase)
dup_set.add(_lowerCamelCase)
lowercase__ : Tuple = inits[j].data_type
lowercase__ : int = numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , _lowerCamelCase)
total_reduced_size += mem_size
lowercase__ : Dict = inits[i].name
lowercase__ : Tuple = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase)
else:
lowercase__ : str = [name_j]
ind_to_replace.append((j, i))
print("total reduced size: " , total_reduced_size / 1024 / 1024 / 1024 , "GB")
lowercase__ : Tuple = sorted(_lowerCamelCase)
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ : List[Any] = "optimized_" + model_file_name
lowercase__ : str = os.path.join(_lowerCamelCase , _lowerCamelCase)
onnx.save(_lowerCamelCase , _lowerCamelCase)
return new_model
| 333 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 1 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = RoCBertTokenizer
_A = None
_A = False
_A = True
_A = filter_non_english
def _lowerCamelCase ( self :Optional[int] ) -> int:
super().setUp()
__UpperCamelCase : Tuple = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
__UpperCamelCase : Tuple = {}
__UpperCamelCase : List[str] = {}
for i, value in enumerate(a ):
__UpperCamelCase : Dict = i
__UpperCamelCase : Union[str, Any] = i
__UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
__UpperCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(a , a , ensure_ascii=a )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(a , a , ensure_ascii=a )
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
__UpperCamelCase : int = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase : Any = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(a , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(a ) , [5, 6, 2, 5, 7, 8] )
def _lowerCamelCase ( self :Optional[Any] ) -> List[str]:
__UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowerCamelCase ( self :str ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : Any = RoCBertBasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[Any]:
__UpperCamelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=a )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCamelCase ( self :Optional[int] ) -> List[Any]:
__UpperCamelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCamelCase ( self :int ) -> Optional[Any]:
__UpperCamelCase : int = RoCBertBasicTokenizer(do_lower_case=a , strip_accents=a )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCamelCase ( self :Optional[Any] ) -> int:
__UpperCamelCase : Optional[int] = RoCBertBasicTokenizer(do_lower_case=a , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowerCamelCase ( self :Union[str, Any] ) -> List[Any]:
__UpperCamelCase : Tuple = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__UpperCamelCase : List[Any] = {}
for i, token in enumerate(a ):
__UpperCamelCase : List[str] = i
__UpperCamelCase : List[Any] = RoCBertWordpieceTokenizer(vocab=a , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _lowerCamelCase ( self :Optional[int] ) -> Dict:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowerCamelCase ( self :Dict ) -> Optional[int]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _lowerCamelCase ( self :Tuple ) -> List[str]:
__UpperCamelCase : Union[str, Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
__UpperCamelCase : List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(a ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def _lowerCamelCase ( self :Optional[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(a , **a )
__UpperCamelCase : int = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase : int = tokenizer_r.encode_plus(
a , return_attention_mask=a , return_token_type_ids=a , return_offsets_mapping=a , add_special_tokens=a , )
__UpperCamelCase : Tuple = tokenizer_r.do_lower_case if hasattr(a , "do_lower_case" ) else False
__UpperCamelCase : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "Allen"),
((2_1, 2_3), "##NL"),
((2_3, 2_4), "##P"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), "allen"),
((2_1, 2_3), "##nl"),
((2_3, 2_4), "##p"),
((2_5, 3_3), "sentence"),
((3_3, 3_4), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _lowerCamelCase ( self :int ) -> Optional[int]:
__UpperCamelCase : List[Any] = ["的", "人", "有"]
__UpperCamelCase : int = "".join(a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase : str = True
__UpperCamelCase : Tuple = self.tokenizer_class.from_pretrained(a , **a )
__UpperCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(a , **a )
__UpperCamelCase : Optional[int] = tokenizer_p.encode(a , add_special_tokens=a )
__UpperCamelCase : Union[str, Any] = tokenizer_r.encode(a , add_special_tokens=a )
__UpperCamelCase : Dict = tokenizer_r.convert_ids_to_tokens(a )
__UpperCamelCase : Tuple = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a , a )
self.assertListEqual(a , a )
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Any = self.rust_tokenizer_class.from_pretrained(a , **a )
__UpperCamelCase : int = self.tokenizer_class.from_pretrained(a , **a )
__UpperCamelCase : str = tokenizer_r.encode(a , add_special_tokens=a )
__UpperCamelCase : Union[str, Any] = tokenizer_p.encode(a , add_special_tokens=a )
__UpperCamelCase : int = tokenizer_r.convert_ids_to_tokens(a )
__UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(a )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase : Union[str, Any] = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(a )
]
self.assertListEqual(a , a )
self.assertListEqual(a , a )
@slow
def _lowerCamelCase ( self :List[Any] ) -> Optional[int]:
__UpperCamelCase : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase : Dict = tokenizer.encode("你好" , add_special_tokens=a )
__UpperCamelCase : Dict = tokenizer.encode("你是谁" , add_special_tokens=a )
__UpperCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a )
__UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _lowerCamelCase ( self :List[str] ) -> Any:
__UpperCamelCase : List[str] = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase : Union[str, Any] = "你好,你是谁"
__UpperCamelCase : str = tokenizer.tokenize(a )
__UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(a )
__UpperCamelCase : int = tokenizer.convert_tokens_to_shape_ids(a )
__UpperCamelCase : Dict = tokenizer.convert_tokens_to_pronunciation_ids(a )
__UpperCamelCase : Tuple = tokenizer.prepare_for_model(
a , a , a , add_special_tokens=a )
__UpperCamelCase : List[str] = tokenizer.encode_plus(a , add_special_tokens=a )
self.assertEqual(a , a ) | 232 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Any) -> Dict:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__UpperCamelCase : Any = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
__UpperCamelCase : str = F'{src_lang}-{tgt_lang}'
__UpperCamelCase : Tuple = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
__UpperCamelCase : Dict = os.path.join(_lowerCamelCase , "README.md")
print(F'Generating {path}')
with open(_lowerCamelCase , "w" , encoding="utf-8") as f:
f.write(_lowerCamelCase)
# make sure we are under the root of the project
lowercase : List[str] = Path(__file__).resolve().parent.parent.parent
lowercase : Union[str, Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase , lowercase , lowercase : int = model_name.split('-')
lowercase : Dict = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang) | 232 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase_ : Tuple =random.Random()
def UpperCamelCase ( _A : Optional[Any] , _A : List[Any]=1.0 , _A : List[Any]=None , _A : Optional[Any]=None )-> Optional[int]:
"""simple docstring"""
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase ( unittest.TestCase ):
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=7 , UpperCAmelCase__=400 , UpperCAmelCase__=2_000 , UpperCAmelCase__=24 , UpperCAmelCase__=24 , UpperCAmelCase__=0.0 , UpperCAmelCase__=16_000 , UpperCAmelCase__=True , UpperCAmelCase__=True , ):
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = feature_size
A__ = num_mel_bins
A__ = padding_value
A__ = sampling_rate
A__ = return_attention_mask
A__ = do_normalize
def __A ( self ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , UpperCAmelCase__=False , UpperCAmelCase__=False ):
def _flatten(UpperCAmelCase__ ):
return list(itertools.chain(*lowercase_ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
lowerCAmelCase : List[Any] = SpeechaTextFeatureExtractor if is_speech_available() else None
def __A ( self ):
A__ = SpeechaTextFeatureExtractionTester(self )
def __A ( self , UpperCAmelCase__ ):
self.assertTrue(np.all(np.mean(lowercase_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase_ , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A__ = [np.asarray(lowercase_ ) for speech_input in speech_inputs]
# Test feature size
A__ = feature_extractor(lowercase_ , padding=lowercase_ , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
A__ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
A__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test batched
A__ = feature_extractor(lowercase_ , return_tensors="np" ).input_features
A__ = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ = np.asarray(lowercase_ )
A__ = feature_extractor(lowercase_ , return_tensors="np" ).input_features
A__ = feature_extractor(lowercase_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ):
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def __A ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A__ = ["longest", "max_length", "do_not_pad"]
A__ = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
A__ = feature_extractor(
lowercase_ , padding=lowercase_ , max_length=lowercase_ , return_attention_mask=lowercase_ )
A__ = inputs.input_features
A__ = inputs.attention_mask
A__ = [np.sum(lowercase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A__ = ["longest", "max_length", "do_not_pad"]
A__ = [None, 16, None]
for max_length, padding in zip(lowercase_ , lowercase_ ):
A__ = feature_extractor(
lowercase_ , max_length=lowercase_ , padding=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ )
A__ = inputs.input_features
A__ = inputs.attention_mask
A__ = [np.sum(lowercase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A__ = feature_extractor(
lowercase_ , padding="max_length" , max_length=4 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , )
A__ = inputs.input_features
A__ = inputs.attention_mask
A__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __A ( self ):
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A__ = feature_extractor(
lowercase_ , padding="longest" , max_length=4 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , )
A__ = inputs.input_features
A__ = inputs.attention_mask
A__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
A__ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
A__ = feature_extractor(
lowercase_ , padding="longest" , max_length=16 , truncation=lowercase_ , return_tensors="np" , return_attention_mask=lowercase_ , )
A__ = inputs.input_features
A__ = inputs.attention_mask
A__ = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __A ( self ):
import torch
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = np.random.rand(100 , 32 ).astype(np.floataa )
A__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
A__ = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , UpperCAmelCase__ ):
from datasets import load_dataset
A__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A__ = ds.sort("id" ).select(range(lowercase_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __A ( self ):
A__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = feature_extractor(lowercase_ , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase_ , atol=1e-4 ) )
| 351 |
import datasets
from .evaluate import evaluate
UpperCAmelCase_ : List[Any] = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
UpperCAmelCase_ : Any = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
UpperCAmelCase_ : Tuple = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=UpperCAmelCase__ , predictions=UpperCAmelCase__ )
return score
| 198 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = old_name
if "patch_embed" in old_name:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = old_name.split('.' )
if layer == "0":
__lowerCamelCase = old_name.replace('0' , 'convolution1' )
elif layer == "1":
__lowerCamelCase = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__lowerCamelCase = old_name.replace('3' , 'convolution2' )
else:
__lowerCamelCase = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , UpperCamelCase__ ):
__lowerCamelCase = R'\b\d{2}\b'
if bool(re.search(UpperCamelCase__ , UpperCamelCase__ ) ):
__lowerCamelCase = re.search(R'\d\.\d\d.' , UpperCamelCase__ ).group()
else:
__lowerCamelCase = re.search(R'\d\.\d.' , UpperCamelCase__ ).group()
if int(match[0] ) < 6:
__lowerCamelCase = old_name.replace(UpperCamelCase__ , '' )
__lowerCamelCase = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__lowerCamelCase = 'intermediate_stages.' + trimmed_name
else:
__lowerCamelCase = old_name.replace(UpperCamelCase__ , '' )
if int(match[2] ) < num_meta4D_last_stage:
__lowerCamelCase = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__lowerCamelCase = str(int(match[2] ) - num_meta4D_last_stage )
__lowerCamelCase = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__lowerCamelCase = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__lowerCamelCase = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__lowerCamelCase = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__lowerCamelCase = trimmed_name.replace('fc2' , 'linear_out' )
__lowerCamelCase = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , UpperCamelCase__ ):
__lowerCamelCase = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__lowerCamelCase = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__lowerCamelCase = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__lowerCamelCase = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__lowerCamelCase = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__lowerCamelCase = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__lowerCamelCase = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__lowerCamelCase = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__lowerCamelCase = new_name.replace('norm' , 'layernorm' )
__lowerCamelCase = 'efficientformer.' + new_name
else:
__lowerCamelCase = 'efficientformer.encoder.' + new_name
return new_name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> int:
"""simple docstring"""
for key in checkpoint.copy().keys():
__lowerCamelCase = checkpoint.pop(UpperCamelCase__ )
__lowerCamelCase = val
return checkpoint
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
def lowerCamelCase_ ( UpperCamelCase__ : Path , UpperCamelCase__ : Path , UpperCamelCase__ : Path , UpperCamelCase__ : bool ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
__lowerCamelCase = EfficientFormerConfig.from_json_file(UpperCamelCase__ )
__lowerCamelCase = EfficientFormerForImageClassificationWithTeacher(UpperCamelCase__ )
__lowerCamelCase = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__lowerCamelCase = config.depths[-1] - config.num_metaad_blocks + 1
__lowerCamelCase = convert_torch_checkpoint(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
__lowerCamelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__lowerCamelCase = prepare_img()
__lowerCamelCase = 256
__lowerCamelCase = 224
__lowerCamelCase = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' ).pixel_values
# original processing pipeline
__lowerCamelCase = Compose(
[
Resize(UpperCamelCase__ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(UpperCamelCase__ ),
ToTensor(),
Normalize(UpperCamelCase__ , UpperCamelCase__ ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = model(UpperCamelCase__ )
__lowerCamelCase = outputs.logits
__lowerCamelCase = (1, 1000)
if "l1" in model_name:
__lowerCamelCase = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , UpperCamelCase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__lowerCamelCase = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , UpperCamelCase__ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__lowerCamelCase = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(UpperCamelCase__ )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
__A = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 90 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__A = 10
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
if array[i] == target:
return i
return -1
def lowerCamelCase_ ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = len(UpperCamelCase__ )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowerCamelCase = one_third - 1
elif array[two_third] < target:
__lowerCamelCase = two_third + 1
else:
__lowerCamelCase = one_third + 1
__lowerCamelCase = two_third - 1
else:
return -1
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = (left + right) // 3 + 1
__lowerCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase__ , one_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("Enter numbers separated by comma:\n").strip()
__A = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__A = int(input("Enter the number to be found in the list:\n").strip())
__A = ite_ternary_search(collection, target)
__A = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 90 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( lowerCAmelCase__ , unittest.TestCase ):
_a : Tuple= DiTPipeline
_a : Optional[Any]= CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_a : List[str]= PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_a : List[Any]= CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_a : str= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Any = TransformeraDModel(
sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=a__ ,activation_fn="""gelu-approximate""" ,num_embeds_ada_norm=1000 ,norm_type="""ada_norm_zero""" ,norm_elementwise_affine=a__ ,)
lowercase : Optional[Any] = AutoencoderKL()
lowercase : Optional[Any] = DDIMScheduler()
lowercase : List[str] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=0 ):
'''simple docstring'''
if str(a__ ).startswith("""mps""" ):
lowercase : Optional[Any] = torch.manual_seed(a__ )
else:
lowercase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
lowercase : Optional[int] = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = """cpu"""
lowercase : str = self.get_dummy_components()
lowercase : List[str] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowercase : List[str] = self.get_dummy_inputs(a__ )
lowercase : Optional[Any] = pipe(**a__ ).images
lowercase : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 16, 16, 3) )
lowercase : str = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ ,1e-3 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=a__ ,expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = torch.manual_seed(0 )
lowercase : Optional[int] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
lowercase : Optional[Any] = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
lowercase : Optional[Any] = pipe.get_label_ids(a__ )
lowercase : Tuple = pipe(a__ ,generator=a__ ,num_inference_steps=40 ,output_type="""np""" ).images
for word, image in zip(a__ ,a__ ):
lowercase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
lowercase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
lowercase : Tuple = ["""vase""", """umbrella"""]
lowercase : Dict = pipe.get_label_ids(a__ )
lowercase : Optional[Any] = torch.manual_seed(0 )
lowercase : Union[str, Any] = pipe(a__ ,generator=a__ ,num_inference_steps=25 ,output_type="""np""" ).images
for word, image in zip(a__ ,a__ ):
lowercase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 364 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase : Tuple = get_logger(__name__)
lowercase : Optional[int] = Path(__file__).parent / """model_card_template.md"""
lowercase : Dict = uuida().hex
lowercase : Tuple = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : str = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowercase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _snake_case( SCREAMING_SNAKE_CASE__ = None ) -> str:
lowercase : str = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> Dict:
if token is None:
lowercase : Optional[int] = HfFolder.get_token()
if organization is None:
lowercase : int = whoami(SCREAMING_SNAKE_CASE__ )["""name"""]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
lowercase : str = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""" ) else None
lowercase : int = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
lowercase : str = os.path.join(args.output_dir , """README.md""" )
model_card.save(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Optional[Any]:
if resolved_file is None or commit_hash is not None:
return commit_hash
lowercase : List[Any] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
lowercase : Any = re.search(R"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
lowercase : List[Any] = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase : Optional[Any] = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowercase : Optional[int] = os.path.join(hf_cache_home, """diffusers""")
def _snake_case( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> None:
if new_cache_dir is None:
lowercase : Union[str, Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
lowercase : List[str] = old_diffusers_cache
lowercase : Dict = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
lowercase : int = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
lowercase : Any = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase : Dict = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowercase : Any = 0
else:
with open(cache_version_file) as f:
try:
lowercase : List[Any] = int(f.read())
except ValueError:
lowercase : int = 0
if cache_version < 1:
lowercase : Union[str, Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowercase : int = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> str:
if variant is not None:
lowercase : List[str] = weights_name.split(""".""" )
lowercase : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
lowercase : int = """.""".join(SCREAMING_SNAKE_CASE__ )
return weights_name
def _snake_case( SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
lowercase : Optional[int] = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
lowercase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
lowercase : Any = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
lowercase : Any = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}' so that the correct variant file can be added." , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
lowercase : int = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
"""listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
"""this model name. Check the model page at """
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
"""'https://huggingface.co/models', make sure you don't have a local directory with the same name. """
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 285 | 0 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : int = """EncodecFeatureExtractor"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: int = self.feature_extractor
__lowerCAmelCase: Union[str, Any] = False
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=True)-> List[str]:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__)
def __call__( self : Dict , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str])-> Any:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = kwargs.pop("audio" , UpperCamelCase__)
__lowerCAmelCase: Optional[int] = kwargs.pop("sampling_rate" , UpperCamelCase__)
__lowerCAmelCase: Optional[int] = kwargs.pop("text" , UpperCamelCase__)
if len(UpperCamelCase__) > 0:
__lowerCAmelCase: Union[str, Any] = args[0]
__lowerCAmelCase: Any = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if text is not None:
__lowerCAmelCase: List[str] = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__)
if audio is not None:
__lowerCAmelCase: List[Any] = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCAmelCase: List[Any] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
__lowerCAmelCase: Any = audio_inputs["padding_mask"]
return inputs
def lowercase_ ( self : Optional[Any] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Tuple = kwargs.pop("audio" , UpperCamelCase__)
__lowerCAmelCase: Tuple = kwargs.pop("padding_mask" , UpperCamelCase__)
if len(UpperCamelCase__) > 0:
__lowerCAmelCase: Optional[Any] = args[0]
__lowerCAmelCase: str = args[1:]
if audio_values is not None:
return self._decode_audio(UpperCamelCase__ , padding_mask=UpperCamelCase__)
else:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Tuple)-> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional = None)-> List[np.ndarray]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = to_numpy(UpperCamelCase__)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Optional[Any] = audio_values.shape
if padding_mask is None:
return list(UpperCamelCase__)
__lowerCAmelCase: int = to_numpy(UpperCamelCase__)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCAmelCase: Tuple = seq_len - padding_mask.shape[-1]
__lowerCAmelCase: Dict = 1 - self.feature_extractor.padding_value
__lowerCAmelCase: Optional[int] = np.pad(UpperCamelCase__ , ((0, 0), (0, difference)) , "constant" , constant_values=UpperCamelCase__)
__lowerCAmelCase: Any = audio_values.tolist()
for i in range(UpperCamelCase__):
__lowerCAmelCase: Union[str, Any] = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCAmelCase: Optional[Any] = sliced_audio.reshape(UpperCamelCase__ , -1)
return audio_values
| 217 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Tuple = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : str = BloomTokenizerFast
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : int = """tokenizer_file"""
SCREAMING_SNAKE_CASE_ : List[str] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def lowercase_ ( self : List[Any])-> Dict:
'''simple docstring'''
super().setUp()
__lowerCAmelCase: Optional[Any] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer")
tokenizer.save_pretrained(self.tmpdirname)
def lowercase_ ( self : List[Any] , **UpperCamelCase__ : Union[str, Any])-> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__)
def lowercase_ ( self : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = self.get_rust_tokenizer()
__lowerCAmelCase: int = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
__lowerCAmelCase: List[str] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
__lowerCAmelCase: List[str] = tokenizer.batch_encode_plus(UpperCamelCase__)["input_ids"]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
__lowerCAmelCase: List[Any] = tokenizer.batch_decode(UpperCamelCase__)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Tuple=6)-> Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCAmelCase: Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__lowerCAmelCase: Dict = "This is a simple input"
__lowerCAmelCase: str = ["This is a simple input 1", "This is a simple input 2"]
__lowerCAmelCase: int = ("This is a simple input", "This is a pair")
__lowerCAmelCase: Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__)
tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__)
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding")
__lowerCAmelCase: Tuple = None # Hotfixing padding = None
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Simple input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length")
# Pair input
self.assertRaises(
UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , )
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = self.get_rust_tokenizer()
__lowerCAmelCase: List[str] = load_dataset("xnli" , "all_languages" , split="test" , streaming=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = next(iter(UpperCamelCase__))["premise"] # pick up one data
__lowerCAmelCase: Any = list(sample_data.values())
__lowerCAmelCase: int = list(map(tokenizer.encode , UpperCamelCase__))
__lowerCAmelCase: str = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) for x in output_tokens]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Optional[int])-> str:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 217 | 1 |
A_ = 6_55_21
def UpperCAmelCase__ (snake_case__ : str ):
"""simple docstring"""
_snake_case : List[Any] = 1
_snake_case : int = 0
for plain_chr in plain_text:
_snake_case : Optional[Any] = (a + ord(snake_case__ )) % MOD_ADLER
_snake_case : int = (b + a) % MOD_ADLER
return (b << 16) | a
| 367 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger()
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = field(default_factory=__a )
lowercase__ = field(default_factory=__a )
def UpperCamelCase_ ( self: Optional[Any], a_: Union[str, Any], a_: Tensor, a_: Tensor ):
'''simple docstring'''
_snake_case : Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(a_, nn.Convad ) or isinstance(a_, nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self: List[Any], a_: Tensor ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = 0
lowercase__ = field(default_factory=__a )
lowercase__ = field(default_factory=__a )
def __call__( self: Dict, a_: Tensor ):
'''simple docstring'''
_snake_case : Tuple = Tracker(self.dest )(a_ ).parametrized
_snake_case : int = Tracker(self.src )(a_ ).parametrized
_snake_case : Tuple = list(filter(lambda a_ : type(a_ ) not in self.src_skip, a_ ) )
_snake_case : Union[str, Any] = list(filter(lambda a_ : type(a_ ) not in self.dest_skip, a_ ) )
if len(a_ ) != len(a_ ):
raise Exception(
f"Numbers of operations are different. Source module has {len(a_ )} operations while"
f" destination module has {len(a_ )}." )
for dest_m, src_m in zip(a_, a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"Transfered from={src_m} to={dest_m}" )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : ResNetConfig , snake_case__ : Path , snake_case__ : bool = True ):
"""simple docstring"""
print(F"Converting {name}..." )
with torch.no_grad():
_snake_case : Dict = timm.create_model(snake_case__ , pretrained=snake_case__ ).eval()
_snake_case : List[Any] = ResNetForImageClassification(snake_case__ ).eval()
_snake_case : List[str] = ModuleTransfer(src=snake_case__ , dest=snake_case__ )
_snake_case : Optional[Any] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(snake_case__ )
assert torch.allclose(from_model(snake_case__ ) , our_model(snake_case__ ).logits ), "The model logits don't match the original one."
_snake_case : Optional[int] = F"resnet{'-'.join(name.split('resnet' ) )}"
print(snake_case__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=snake_case__ , )
# we can use the convnext one
_snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=snake_case__ , )
print(F"Pushed {checkpoint_name}" )
def UpperCAmelCase__ (snake_case__ : Path , snake_case__ : str = None , snake_case__ : bool = True ):
"""simple docstring"""
_snake_case : Optional[Any] = """imagenet-1k-id2label.json"""
_snake_case : Optional[Any] = 10_00
_snake_case : str = (1, num_labels)
_snake_case : List[Any] = """huggingface/label-files"""
_snake_case : Union[str, Any] = num_labels
_snake_case : Optional[int] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
_snake_case : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
_snake_case : str = idalabel
_snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
_snake_case : Tuple = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ )
_snake_case : Optional[int] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return config, expected_shape
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
A_ = parser.parse_args()
A_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 132 | 0 |
'''simple docstring'''
from math import pow
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: int ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE : Dict = int(pow(__UpperCamelCase ,__UpperCamelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = backtrack(
__UpperCamelCase ,__UpperCamelCase ,current_number + 1 ,__UpperCamelCase ,__UpperCamelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = backtrack(
__UpperCamelCase ,__UpperCamelCase ,current_number + 1 ,__UpperCamelCase ,__UpperCamelCase )
return current_sum, solutions_count
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ):
"""simple docstring"""
if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(__UpperCamelCase ,__UpperCamelCase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 251 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = "▁"
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}
}
SCREAMING_SNAKE_CASE__ = {
"google/pegasus-xsum": 512,
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self , lowercase , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<mask_2>" , lowercase="<mask_1>" , lowercase=None , lowercase=103 , lowercase = None , **lowercase , ) -> None:
lowerCAmelCase = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'additional_special_tokens should be of type {type(lowercase )}, but is'
f' {type(lowercase )}' )
lowerCAmelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
lowerCAmelCase = additional_special_tokens_extended
else:
lowerCAmelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 , self.offset )]
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , pad_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
lowerCAmelCase = mask_token_sent
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
# add special tokens to encoder dict
lowerCAmelCase = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
@property
def _snake_case ( self ) -> int:
return len(self.sp_model ) + self.offset
def _snake_case ( self ) -> Dict[str, int]:
lowerCAmelCase = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , lowercase ) -> List[Any]:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , lowercase ) -> List[str]:
return self.sp_model.encode(lowercase , out_type=lowercase )
def _snake_case ( self , lowercase ) -> int:
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
lowerCAmelCase = self.sp_model.piece_to_id(lowercase )
return sp_id + self.offset
def _snake_case ( self , lowercase ) -> str:
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
lowerCAmelCase = self.sp_model.IdToPiece(index - self.offset )
return token
def _snake_case ( self , lowercase ) -> Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _snake_case ( self , lowercase=False ) -> Tuple:
return 1
def _snake_case ( self , lowercase ) -> Tuple:
lowerCAmelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def _snake_case ( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _snake_case ( self , lowercase , lowercase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _snake_case ( self , lowercase , lowercase = None ) -> Tuple[str]:
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 46 | 0 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class __snake_case ( _a ):
UpperCAmelCase__ : List[str] = """autoformer"""
UpperCAmelCase__ : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "student_t" , _snake_case : str = "nll" , _snake_case : int = 1 , _snake_case : List[int] = [1, 2, 3, 4, 5, 6, 7] , _snake_case : bool = True , _snake_case : int = 0 , _snake_case : int = 0 , _snake_case : int = 0 , _snake_case : int = 0 , _snake_case : Optional[List[int]] = None , _snake_case : Optional[List[int]] = None , _snake_case : int = 64 , _snake_case : int = 2 , _snake_case : int = 2 , _snake_case : int = 2 , _snake_case : int = 2 , _snake_case : int = 32 , _snake_case : int = 32 , _snake_case : str = "gelu" , _snake_case : float = 0.1 , _snake_case : float = 0.1 , _snake_case : float = 0.1 , _snake_case : float = 0.1 , _snake_case : float = 0.1 , _snake_case : int = 100 , _snake_case : float = 0.0_2 , _snake_case : bool = True , _snake_case : Optional[int]=True , _snake_case : int = 10 , _snake_case : int = 25 , _snake_case : int = 3 , **_snake_case : str , ):
"""simple docstring"""
UpperCAmelCase_ = prediction_length
UpperCAmelCase_ = context_length if context_length is not None else prediction_length
UpperCAmelCase_ = distribution_output
UpperCAmelCase_ = loss
UpperCAmelCase_ = input_size
UpperCAmelCase_ = num_time_features
UpperCAmelCase_ = lags_sequence
UpperCAmelCase_ = scaling
UpperCAmelCase_ = num_dynamic_real_features
UpperCAmelCase_ = num_static_real_features
UpperCAmelCase_ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowerCamelCase) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''')
UpperCAmelCase_ = cardinality
else:
UpperCAmelCase_ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowerCamelCase) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''')
UpperCAmelCase_ = embedding_dimension
else:
UpperCAmelCase_ = [min(50 , (cat + 1) // 2) for cat in self.cardinality]
UpperCAmelCase_ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ = input_size * len(self.lags_sequence) + self._number_of_features
UpperCAmelCase_ = d_model
UpperCAmelCase_ = encoder_attention_heads
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = encoder_ffn_dim
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = encoder_layers
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = encoder_layerdrop
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = init_std
UpperCAmelCase_ = use_cache
# Autoformer
UpperCAmelCase_ = label_length
UpperCAmelCase_ = moving_average
UpperCAmelCase_ = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase)
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 367 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def A (__A : BertModel , __A : str , __A : str ) -> int:
"""simple docstring"""
UpperCAmelCase_ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase_ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__A ):
os.makedirs(__A )
UpperCAmelCase_ = model.state_dict()
def to_tf_var_name(__A : str ):
for patt, repl in iter(__A ):
UpperCAmelCase_ = name.replace(__A , __A )
return F"""bert/{name}"""
def create_tf_var(__A : np.ndarray , __A : str , __A : tf.Session ):
UpperCAmelCase_ = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase_ = tf.get_variable(dtype=__A , shape=tensor.shape , name=__A , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__A )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase_ = to_tf_var_name(__A )
UpperCAmelCase_ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase_ = torch_tensor.T
UpperCAmelCase_ = create_tf_var(tensor=__A , name=__A , session=__A )
tf.keras.backend.set_value(__A , __A )
UpperCAmelCase_ = session.run(__A )
print(F"""Successfully created {tf_name}: {np.allclose(__A , __A )}""" )
UpperCAmelCase_ = tf.train.Saver(tf.trainable_variables() )
saver.save(__A , os.path.join(__A , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def A (__A : Any=None ) -> str:
"""simple docstring"""
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__A , required=__A , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__A , default=__A , required=__A , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__A , required=__A , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__A , required=__A , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase_ = parser.parse_args(__A )
UpperCAmelCase_ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__A , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 7 | 0 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[Any] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = torch.sin(t * math.pi / 2 ) ** 2
A_ : Optional[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : List[Any] = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : int = deepcopy(self.diffusion )
A_ : str = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :int = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :List[Any] = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :Dict = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :Union[str, Any] = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :Dict = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :str = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Tuple = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : str = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : List[Any] = string[6:]
elif string.startswith("""net.""" ):
A_ : Union[str, Any] = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Optional[Any] = string[7:]
if string.startswith("""main.""" ):
A_ : int = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[int] = string[:2]
A_ : List[str] = string[2:]
else:
A_ : int = string[0]
A_ : Optional[Any] = string[1:]
if depth == max_depth:
A_ : int = MID_NUM_TO_LAYER[layer_num]
A_ : Tuple = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : str = UP_NUM_TO_LAYER[layer_num]
A_ : Optional[int] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : Optional[int] = DEPTH_0_TO_LAYER[layer_num]
A_ : str = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : str = string_left[1:]
if "resnets" in new_layer:
A_ : Dict = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : List[Any] = convert_attn_naming(lowerCamelCase__ )
A_ : str = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[int] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Union[str, Any] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : Tuple = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[int] = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : Dict = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : List[str] = v[:, :, 0]
else:
# bias
A_ : List[str] = v
else:
# qkv matrices
A_ : Dict = v.shape[0]
A_ : Optional[int] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : List[Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : int = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Tuple = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : Optional[int] = download(lowerCamelCase__ )
A_ : Tuple = MODELS_MAP[model_name]["""sample_rate"""]
A_ : Dict = MODELS_MAP[model_name]["""sample_size"""]
A_ : Optional[int] = Object()
A_ : List[str] = sample_size
A_ : Any = sample_rate
A_ : Optional[int] = 0
A_ : List[str] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : List[str] = diffusers_model.state_dict()
A_ : List[Any] = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Dict = orig_model.diffusion_ema.eval()
A_ : int = orig_model.state_dict()
A_ : Union[str, Any] = rename_orig_weights(lowerCamelCase__ )
A_ : str = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Union[str, Any] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : Tuple = value.squeeze()
A_ : List[Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : int = 1_00
A_ : List[str] = 33
A_ : str = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : Optional[Any] = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : str = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : Tuple = torch.manual_seed(33 )
A_ : List[Any] = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : int = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : Optional[Any] = generated.clamp(-1 , 1 )
A_ : int = (generated - audio).abs().sum()
A_ : Tuple = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :Dict = parser.parse_args()
main(args) | 206 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ):
A_ : List[Any] = parent
A_ : str = batch_size
A_ : List[Any] = seq_length
A_ : Dict = is_training
A_ : List[Any] = use_attention_mask
A_ : Any = use_token_type_ids
A_ : Optional[int] = use_labels
A_ : Tuple = vocab_size
A_ : List[str] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : int = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[str] = num_choices
def _a (self ):
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Any = None
if self.use_attention_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Union[str, Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=lowercase , )
return config, input_ids, attention_mask
def _a (self ):
A_ : List[str] = self.prepare_config_and_inputs()
A_, A_, A_ : str = config_and_inputs
A_ : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
A_ : Tuple = FlaxDistilBertModelTester(self )
@slow
def _a (self ):
for model_class_name in self.all_model_classes:
A_ : Union[str, Any] = model_class_name.from_pretrained("""distilbert-base-uncased""" )
A_ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase )
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : List[str] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A_ : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A_ : int = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A_ : Optional[int] = model(lowercase , attention_mask=lowercase )[0]
A_ : Optional[Any] = (1, 11, 768)
self.assertEqual(output.shape , lowercase )
A_ : Union[str, Any] = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowercase , atol=1E-4 ) ) | 206 | 1 |
"""simple docstring"""
import random
def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : float , lowerCamelCase__ : bool = False ) -> dict:
lowerCamelCase_ : dict ={i: [] for i in range(lowerCamelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase__ ):
for j in range(i + 1 , lowerCamelCase__ ):
if random.random() < probability:
graph[i].append(lowerCamelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase__ )
return graph
def _snake_case ( lowerCamelCase__ : int ) -> dict:
return {
i: [j for j in range(lowerCamelCase__ ) if i != j] for i in range(lowerCamelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 350 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ) -> tuple:
lowerCamelCase_ : Optional[Any] =namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class __lowerCAmelCase ( UpperCAmelCase__ ):
lowercase = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase = Features({"audio": Audio()} )
lowercase = Features({"labels": ClassLabel} )
lowercase = "audio"
lowercase = "labels"
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__UpperCamelCase = copy.deepcopy(self )
__UpperCamelCase = self.label_schema.copy()
__UpperCamelCase = features[self.label_column]
__UpperCamelCase = label_schema
return task_template
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 316 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__: List[str] = logging.get_logger(__name__)
A__: Union[str, Any] = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : int = "data2vec-text"
def __init__( self :str , SCREAMING_SNAKE_CASE :Optional[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE :Any=7_6_8 , SCREAMING_SNAKE_CASE :List[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=1_2 , SCREAMING_SNAKE_CASE :Dict=3_0_7_2 , SCREAMING_SNAKE_CASE :List[str]="gelu" , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :List[str]=0.1 , SCREAMING_SNAKE_CASE :int=5_1_2 , SCREAMING_SNAKE_CASE :int=2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Dict=1e-12 , SCREAMING_SNAKE_CASE :int=1 , SCREAMING_SNAKE_CASE :Dict=0 , SCREAMING_SNAKE_CASE :List[Any]=2 , SCREAMING_SNAKE_CASE :str="absolute" , SCREAMING_SNAKE_CASE :Tuple=True , SCREAMING_SNAKE_CASE :Union[str, Any]=None , **SCREAMING_SNAKE_CASE :Union[str, Any] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Optional[Any] =vocab_size
_a : Optional[Any] =hidden_size
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Union[str, Any] =hidden_act
_a : Any =intermediate_size
_a : str =hidden_dropout_prob
_a : Optional[Any] =attention_probs_dropout_prob
_a : Optional[Any] =max_position_embeddings
_a : Union[str, Any] =type_vocab_size
_a : Tuple =initializer_range
_a : Optional[int] =layer_norm_eps
_a : Tuple =position_embedding_type
_a : int =use_cache
_a : List[str] =classifier_dropout
class A__ ( UpperCAmelCase__ ):
@property
def __UpperCAmelCase ( self :int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_a : Tuple ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_a : List[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 276 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case_ : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase__ ( lowerCamelCase__ ):
lowercase__ = ['pixel_values']
def __init__( self : int ,lowerCamelCase__ : int = True ,lowerCamelCase__ : str = None ,lowerCamelCase__ : Dict = PILImageResampling.BICUBIC ,lowerCamelCase__ : List[Any] = True ,lowerCamelCase__ : List[str] = None ,lowerCamelCase__ : List[Any] = True ,lowerCamelCase__ : Tuple = 1 / 255 ,lowerCamelCase__ : Optional[int] = True ,lowerCamelCase__ : Union[str, Any] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[Any] = True ,**lowerCamelCase__ : str ,):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = size if size is not None else {'shortest_edge': 224}
_UpperCamelCase : Tuple = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
_UpperCamelCase : List[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_UpperCamelCase : Tuple = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ ,param_name='crop_size' )
_UpperCamelCase : Optional[Any] = do_resize
_UpperCamelCase : int = size
_UpperCamelCase : List[str] = resample
_UpperCamelCase : Optional[Any] = do_center_crop
_UpperCamelCase : Dict = crop_size
_UpperCamelCase : Any = do_rescale
_UpperCamelCase : int = rescale_factor
_UpperCamelCase : str = do_normalize
_UpperCamelCase : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase : Dict = do_convert_rgb
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str] = PILImageResampling.BICUBIC ,lowerCamelCase__ : Tuple = None ,**lowerCamelCase__ : Tuple ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
_UpperCamelCase : List[Any] = get_resize_output_image_size(lowerCamelCase__ ,size=size['shortest_edge'] ,default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any] = None ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] = None ,**lowerCamelCase__ : Optional[Any] ,):
'''simple docstring'''
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] = None ,**lowerCamelCase__ : str ,):
'''simple docstring'''
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict = None ,lowerCamelCase__ : Dict = None ,lowerCamelCase__ : Optional[Any] = None ,lowerCamelCase__ : Dict = None ,lowerCamelCase__ : Tuple = None ,lowerCamelCase__ : Union[str, Any] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : int = None ,lowerCamelCase__ : List[Any] = None ,lowerCamelCase__ : List[Any] = None ,lowerCamelCase__ : Tuple = ChannelDimension.FIRST ,**lowerCamelCase__ : Dict ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase : int = size if size is not None else self.size
_UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase__ ,param_name='size' ,default_to_square=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = resample if resample is not None else self.resample
_UpperCamelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase : Optional[Any] = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase : Tuple = get_size_dict(lowerCamelCase__ ,param_name='crop_size' ,default_to_square=lowerCamelCase__ )
_UpperCamelCase : int = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase : str = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase : Optional[Any] = image_std if image_std is not None else self.image_std
_UpperCamelCase : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase : Union[str, Any] = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase : List[str] = [convert_to_rgb(lowerCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase : List[Any] = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
_UpperCamelCase : List[Any] = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
_UpperCamelCase : Dict = [self.center_crop(image=lowerCamelCase__ ,size=lowerCamelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase : Optional[int] = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase : Optional[Any] = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
_UpperCamelCase : Any = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
_UpperCamelCase : str = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Optional[int] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 236 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__snake_case : Dict = DisjunctiveConstraint(a_ )
self.assertTrue(isinstance(dc.token_ids , a_ ) )
with self.assertRaises(a_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(a_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(a_ ):
DisjunctiveConstraint(a_ ) # fails here
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = [[1, 2, 3], [1, 2, 4]]
__snake_case : Union[str, Any] = DisjunctiveConstraint(a_ )
__snake_case : str = dc.update(1 )
__snake_case : List[str] = stepped is True and completed is False and reset is False
self.assertTrue(a_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__snake_case : Dict = dc.update(2 )
__snake_case : Dict = stepped is True and completed is False and reset is False
self.assertTrue(a_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__snake_case : Optional[Any] = dc.update(3 )
__snake_case : List[Any] = stepped is True and completed is True and reset is False
self.assertTrue(a_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__snake_case : Optional[Any] = DisjunctiveConstraint(a_ )
__snake_case : Any = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__snake_case : Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__snake_case : Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__snake_case : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__snake_case : List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__snake_case : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__snake_case : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 102 | from string import ascii_lowercase, ascii_uppercase
def UpperCamelCase ( __lowercase : str ):
'''simple docstring'''
if not sentence:
return ""
A_ : List[str] = dict(zip(__lowercase ,__lowercase ) )
return lower_to_upper.get(sentence[0] ,sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140 | 0 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase__ = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = "https://pypi.org/pypi/diffusers/json"
lowerCAmelCase_ : Optional[int] = json.loads(request.urlopen(__UpperCamelCase ).read() )["releases"].keys()
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) )
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
lowerCAmelCase_ : str = Path(__UpperCamelCase ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
init_hf_modules()
lowerCAmelCase_ : Dict = Path(__UpperCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
with open(__UpperCamelCase , "r" , encoding="utf-8" ) as f:
lowerCAmelCase_ : Optional[int] = f.read()
# Imports of the form `import .xxx`
lowerCAmelCase_ : int = re.findall("^\s*import\s+\.(\S+)\s*$" , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , __UpperCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__UpperCamelCase ) )
def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = [module_file]
lowerCAmelCase_ : Union[str, Any] = []
# Let's recurse through all relative imports
while not no_change:
lowerCAmelCase_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__UpperCamelCase ) )
lowerCAmelCase_ : Union[str, Any] = Path(__UpperCamelCase ).parent
lowerCAmelCase_ : int = [str(module_path / m ) for m in new_imports]
lowerCAmelCase_ : Any = [f for f in new_import_files if f not in all_relative_imports]
lowerCAmelCase_ : Tuple = [f'''{f}.py''' for f in new_import_files]
lowerCAmelCase_ : Any = len(__UpperCamelCase ) == 0
all_relative_imports.extend(__UpperCamelCase )
return all_relative_imports
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
with open(__UpperCamelCase , "r" , encoding="utf-8" ) as f:
lowerCAmelCase_ : Dict = f.read()
# Imports of the form `import xxx`
lowerCAmelCase_ : Any = re.findall("^\s*import\s+(\S+)\s*$" , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , __UpperCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
lowerCAmelCase_ : Tuple = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
lowerCAmelCase_ : Tuple = list(set(__UpperCamelCase ) )
lowerCAmelCase_ : Dict = []
for imp in imports:
try:
importlib.import_module(__UpperCamelCase )
except ImportError:
missing_packages.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f'''{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`''' )
return get_relative_imports(__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = module_path.replace(os.path.sep , "." )
lowerCAmelCase_ : Optional[Any] = importlib.import_module(__UpperCamelCase )
if class_name is None:
return find_pipeline_class(__UpperCamelCase )
return getattr(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowerCAmelCase_ : List[str] = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) )
lowerCAmelCase_ : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __UpperCamelCase )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
f''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
f''' {loaded_module}.''' )
lowerCAmelCase_ : int = cls
return pipeline_class
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = str(__UpperCamelCase )
lowerCAmelCase_ : Tuple = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
lowerCAmelCase_ : Tuple = module_file_or_url
lowerCAmelCase_ : Optional[Any] = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
lowerCAmelCase_ : Dict = get_diffusers_versions()
# cut ".dev0"
lowerCAmelCase_ : List[str] = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
lowerCAmelCase_ : Any = latest_version if latest_version[1:] in available_versions else "main"
logger.info(f'''Defaulting to latest_version: {revision}.''' )
elif revision in available_versions:
lowerCAmelCase_ : Any = f'''v{revision}'''
elif revision == "main":
lowerCAmelCase_ : Tuple = revision
else:
raise ValueError(
f'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
f''' {", ".join(available_versions + ["main"] )}.''' )
# community pipeline on GitHub
lowerCAmelCase_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase )
try:
lowerCAmelCase_ : List[str] = cached_download(
__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
lowerCAmelCase_ : Optional[int] = "git"
lowerCAmelCase_ : List[Any] = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
else:
try:
# Load from URL or cache if already cached
lowerCAmelCase_ : Optional[Any] = hf_hub_download(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
lowerCAmelCase_ : int = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''' )
raise
# Check we have all the requirements in our environment
lowerCAmelCase_ : Dict = check_imports(__UpperCamelCase )
# Now we move the module inside our cached dynamic modules.
lowerCAmelCase_ : List[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__UpperCamelCase )
lowerCAmelCase_ : List[Any] = Path(__UpperCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__UpperCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
lowerCAmelCase_ : List[Any] = f'''{module_needed}.py'''
shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : List[str] = use_auth_token
elif use_auth_token is True:
lowerCAmelCase_ : Dict = HfFolder.get_token()
else:
lowerCAmelCase_ : int = None
lowerCAmelCase_ : List[Any] = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCAmelCase_ : Optional[Any] = submodule_path / commit_hash
lowerCAmelCase_ : List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__UpperCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__UpperCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__UpperCamelCase , f'''{module_needed}.py''' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return os.path.join(__UpperCamelCase , __UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : List[str] = get_cached_module_file(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return get_class_in_module(__UpperCamelCase , final_module.replace(".py" , "" ) )
| 161 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
if is_torch_available():
import torch
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ) -> Dict:
"""simple docstring"""
if rng is None:
lowerCAmelCase_ : int = global_rng
lowerCAmelCase_ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a_ : Dict , a_ : Dict=7 , a_ : int=4_00 , a_ : Union[str, Any]=20_00 , a_ : Any=1 , a_ : Optional[int]=0.0 , a_ : str=1_60_00 , a_ : Optional[int]=True , a_ : Dict=True , ):
lowerCAmelCase_ : Tuple = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[int] = min_seq_length
lowerCAmelCase_ : List[Any] = max_seq_length
lowerCAmelCase_ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : Dict = feature_size
lowerCAmelCase_ : Tuple = padding_value
lowerCAmelCase_ : int = sampling_rate
lowerCAmelCase_ : str = return_attention_mask
lowerCAmelCase_ : Union[str, Any] = do_normalize
def lowerCamelCase ( self : Dict ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase ( self : List[Any] , a_ : List[Any]=False , a_ : Optional[int]=False ):
def _flatten(a_ : Optional[Any] ):
return list(itertools.chain(*a_ ) )
if equal_length:
lowerCAmelCase_ : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_ : List[Any] = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCamelCase ( A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple = ASTFeatureExtractor
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = ASTFeatureExtractionTester(self )
def lowerCamelCase ( self : Tuple ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : str = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : Optional[Any] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
lowerCAmelCase_ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test batched
lowerCAmelCase_ : Tuple = feat_extract(a_ , padding=a_ , return_tensors="np" ).input_values
lowerCAmelCase_ : int = feat_extract(a_ , padding=a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase_ : Union[str, Any] = np.asarray(a_ )
lowerCAmelCase_ : str = feat_extract(a_ , return_tensors="np" ).input_values
lowerCAmelCase_ : List[Any] = feat_extract(a_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(a_ , a_ ):
self.assertTrue(np.allclose(a_ , a_ , atol=1e-3 ) )
@require_torch
def lowerCamelCase ( self : List[str] ):
import torch
lowerCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Tuple = np.random.rand(1_00 ).astype(np.floataa )
lowerCAmelCase_ : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase_ : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase ( self : List[Any] , a_ : List[str] ):
from datasets import load_dataset
lowerCAmelCase_ : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase_ : Optional[int] = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase ( self : str ):
# fmt: off
lowerCAmelCase_ : Tuple = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
lowerCAmelCase_ : Dict = self._load_datasamples(1 )
lowerCAmelCase_ : Union[str, Any] = ASTFeatureExtractor()
lowerCAmelCase_ : int = feature_extractor(a_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a_ , atol=1e-4 ) )
| 161 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.