code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import math
import flax.linen as nn
import jax.numpy as jnp
def _UpperCamelCase ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 714 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase : Union[str, Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Any=1 ) -> Union[str, Any]:
a = tokenizer
a = dataset
a = len(__lowerCamelCase ) if n_tasks is None else n_tasks
a = n_copies
def __iter__( self : Tuple ) -> str:
a = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
a = self.tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]:
a = start_length
a = eof_strings
a = tokenizer
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
a = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__lowerCamelCase )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
a = re.split("(%s)" % "|".join(A ), A )
# last string should be ""
return "".join(string_list[:-2] )
def __magic_name__ ( A : Union[str, Any], A : Optional[Any], A : List[Any], A : Optional[Any], A : List[str], A : List[Any]=20, **A : Union[str, Any] ):
'''simple docstring'''
a = defaultdict(A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(A ) ):
with torch.no_grad():
a = batch["ids"].shape[-1]
a = accelerator.unwrap_model(A ).generate(
input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=A, **A )
# each task is generated batch_size times
a = batch["task_id"].repeat(A )
a = accelerator.pad_across_processes(
A, dim=1, pad_index=tokenizer.pad_token_id )
a , a = accelerator.gather((generated_tokens, generated_tasks) )
a = generated_tokens.cpu().numpy()
a = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(A, A ):
gen_token_dict[task].append(A )
a = [[] for _ in range(A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a = tokenizer.decode(A, skip_special_tokens=A, clean_up_tokenization_spaces=A )
code_gens[task].append(remove_last_block(A ) )
return code_gens
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser(A )
a = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a = "false"
if args.num_workers is None:
a = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a = Accelerator()
set_seed(args.seed, device_specific=A )
# Load model and tokenizer
a = AutoTokenizer.from_pretrained(args.model_ckpt )
a = tokenizer.eos_token
a = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0, A, A )] ),
}
# Load evaluation dataset and metric
a = load_dataset("openai_humaneval" )
a = load_metric("code_eval" )
a = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
a = args.n_samples // args.batch_size
a = TokenizedDataset(A, human_eval["test"], n_copies=A, n_tasks=A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a = DataLoader(A, batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a = code_eval_metric.compute(references=[""], predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
a , a = accelerator.prepare(A, A )
a = complete_code(
A, A, A, A, n_tasks=A, batch_size=args.batch_size, **A, )
if accelerator.is_main_process:
a = []
for task in tqdm(range(A ) ):
a = human_eval["test"][task]["test"]
a = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
a , a = code_eval_metric.compute(
references=A, predictions=A, num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file, "w" ) as fp:
json.dump(A, A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 662 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _UpperCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = """roberta"""
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
super().__init__(__lowerCamelCase )
a = RobertaEmbeddings(__lowerCamelCase )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _UpperCamelCase , )
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = RobertaConfig
SCREAMING_SNAKE_CASE_ : int = """roberta"""
def __init__( self : List[str] , __lowerCamelCase : Tuple ) -> Optional[int]:
super().__init__(__lowerCamelCase )
a = config.num_labels
a = config.num_hidden_layers
a = DeeRobertaModel(__lowerCamelCase )
a = nn.Dropout(config.hidden_dropout_prob )
a = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(__lowerCamelCase )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=None , __lowerCamelCase : Any=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=-1 , __lowerCamelCase : Tuple=False , ) -> str:
a = self.num_layers
try:
a = self.roberta(
__lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , position_ids=__lowerCamelCase , head_mask=__lowerCamelCase , inputs_embeds=__lowerCamelCase , )
a = outputs[1]
a = self.dropout(__lowerCamelCase )
a = self.classifier(__lowerCamelCase )
a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
a = e.message
a = e.exit_layer
a = outputs[0]
if not self.training:
a = entropy(__lowerCamelCase )
a = []
a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
a = []
for highway_exit in outputs[-1]:
a = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
a = MSELoss()
a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
a = CrossEntropyLoss()
a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(__lowerCamelCase )
if train_highway:
a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
a = (loss,) + outputs
if not self.training:
a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 715 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Any = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : Optional[Any] = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Optional[int] = {
'distilbert-base-uncased': 512,
'distilbert-base-uncased-distilled-squad': 512,
'distilbert-base-cased': 512,
'distilbert-base-cased-distilled-squad': 512,
'distilbert-base-german-cased': 512,
'distilbert-base-multilingual-cased': 512,
}
__lowerCAmelCase : List[str] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : str = DistilBertTokenizer
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=True , __lowerCamelCase : List[str]="[UNK]" , __lowerCamelCase : str="[SEP]" , __lowerCamelCase : Tuple="[PAD]" , __lowerCamelCase : List[str]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=None , **__lowerCamelCase : str , ) -> List[str]:
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars
):
a = getattr(__lowerCamelCase , normalizer_state.pop("type" ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__lowerCamelCase )
a = do_lower_case
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any]=None ) -> int:
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : int , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
a = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
def __magic_name__ ( A : int, A : list[int], A : int ):
'''simple docstring'''
def count_of_possible_combinations(A : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(A )
def __magic_name__ ( A : int, A : list[int], A : int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
A : int, A : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
a = sum(
count_of_possible_combinations_with_dp_array(target - item, A )
for item in array )
a = answer
return answer
a = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(A, A )
def __magic_name__ ( A : int, A : list[int], A : int ):
'''simple docstring'''
a = [0] * (target + 1)
a = 1
for i in range(1, target + 1 ):
for j in range(A ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] = 3
__lowerCAmelCase : Dict = 5
__lowerCAmelCase : Tuple = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 717 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
__lowerCAmelCase : int = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
__lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 662 | 0 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__lowerCAmelCase : List[str] = 'Usage of script: script_name <size_of_canvas:int>'
__lowerCAmelCase : Tuple = [0] * 100 + [1] * 10
random.shuffle(choice)
def __magic_name__ ( A : int ):
'''simple docstring'''
a = [[False for i in range(A )] for j in range(A )]
return canvas
def __magic_name__ ( A : list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(A ):
for j, _ in enumerate(A ):
a = bool(random.getrandbits(1 ) )
def __magic_name__ ( A : list[list[bool]] ):
'''simple docstring'''
a = np.array(A )
a = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(A ):
for c, pt in enumerate(A ):
a = __judge_point(
A, current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
a = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
a = current_canvas.tolist()
return return_canvas
def __magic_name__ ( A : bool, A : list[list[bool]] ):
'''simple docstring'''
a = 0
a = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
a = pt
if pt:
if alive < 2:
a = False
elif alive == 2 or alive == 3:
a = True
elif alive > 3:
a = False
else:
if alive == 3:
a = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__lowerCAmelCase : int = int(sys.argv[1])
# main working structure of this module.
__lowerCAmelCase : Dict = create_canvas(canvas_size)
seed(c)
__lowerCAmelCase : Union[str, Any] = plt.subplots()
fig.show()
__lowerCAmelCase : Optional[int] = ListedColormap(['w', 'k'])
try:
while True:
__lowerCAmelCase : List[str] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 662 | 0 |
from ..utils import DummyObject, requires_backends
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["""sentencepiece"""]
def __init__( self : Any , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Tuple ) -> Any:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["""sentencepiece"""]
def __init__( self : List[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["""sentencepiece"""]
def __init__( self : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : Optional[int] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ) -> Union[str, Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> List[Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : int ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : str , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["""sentencepiece"""]
def __init__( self : Optional[int] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : int ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : List[str] , *__lowerCamelCase : Dict , **__lowerCamelCase : str ) -> List[str]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["""sentencepiece"""]
def __init__( self : Any , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : str ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Any , **__lowerCamelCase : str ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""sentencepiece"""]
def __init__( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Any ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = ["""sentencepiece"""]
def __init__( self : str , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Optional[Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""sentencepiece"""]
def __init__( self : List[str] , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> Union[str, Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["""sentencepiece"""]
def __init__( self : Optional[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Dict ) -> Any:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["""sentencepiece"""]
def __init__( self : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""sentencepiece"""]
def __init__( self : str , *__lowerCamelCase : Tuple , **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ["""sentencepiece"""]
def __init__( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""sentencepiece"""]
def __init__( self : List[Any] , *__lowerCamelCase : str , **__lowerCamelCase : Optional[int] ) -> int:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["""sentencepiece"""]
def __init__( self : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> int:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["""sentencepiece"""]
def __init__( self : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : int ) -> Tuple:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ["""sentencepiece"""]
def __init__( self : List[str] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ) -> Tuple:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ["""sentencepiece"""]
def __init__( self : Any , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : int ) -> Optional[int]:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ["""sentencepiece"""]
def __init__( self : str , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> Dict:
requires_backends(self , ["sentencepiece"] )
class snake_case__ (metaclass=_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = ["""sentencepiece"""]
def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Tuple:
requires_backends(self , ["sentencepiece"] )
| 719 |
import math
import flax.linen as nn
import jax.numpy as jnp
def __magic_name__ ( A : jnp.ndarray, A : int, A : float = 1, A : float = 1, A : float = 1.0E4, A : bool = False, A : float = 1.0, ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a = float(embedding_dim // 2 )
a = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a = min_timescale * jnp.exp(jnp.arange(A, dtype=jnp.floataa ) * -log_timescale_increment )
a = jnp.expand_dims(A, 1 ) * jnp.expand_dims(A, 0 )
# scale embeddings
a = scale * emb
if flip_sin_to_cos:
a = jnp.concatenate([jnp.cos(A ), jnp.sin(A )], axis=1 )
else:
a = jnp.concatenate([jnp.sin(A ), jnp.cos(A )], axis=1 )
a = jnp.reshape(A, [jnp.shape(A )[0], embedding_dim] )
return signal
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> List[Any]:
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__lowerCamelCase )
a = nn.silu(__lowerCamelCase )
a = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__lowerCamelCase )
return temb
class snake_case__ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 32
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : float = 1
@nn.compact
def __call__( self : Tuple , __lowerCamelCase : int ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
__lowerCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 662 | 0 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Tuple ) -> Tuple:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
a = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
a = "sshleifer/tiny-gpt2"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : int ) -> List[str]:
a = "sgugger/tiny-distilbert-classification"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , only_pretrain_model=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Any ) -> int:
a = "sshleifer/tiny-gpt2"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
a = "sshleifer/tiny-gpt2"
a = AutoConfig.from_pretrained(__lowerCamelCase )
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase , [config] )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
a = "sshleifer/tiny-gpt2"
a = AutoConfig.from_pretrained(__lowerCamelCase )
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase , [config] )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
a = "sshleifer/tiny-gpt2"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a = "sshleifer/tiny-gpt2"
a = AutoConfig.from_pretrained(__lowerCamelCase )
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase , [config] )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self : Dict ) -> Dict:
a = "patrickvonplaten/t5-tiny-random"
a = AutoConfig.from_pretrained(__lowerCamelCase )
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase , configs=[config] )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = "sshleifer/tiny-gpt2"
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__lowerCamelCase , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__lowerCamelCase , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
a = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , save_to_csv=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__lowerCamelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__lowerCamelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__lowerCamelCase , "env.csv" ) , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCamelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__lowerCamelCase , "env.csv" ) ).exists() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
a = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__lowerCamelCase : Optional[int] ):
self.assertTrue(hasattr(__lowerCamelCase , "sequential" ) )
self.assertTrue(hasattr(__lowerCamelCase , "cumulative" ) )
self.assertTrue(hasattr(__lowerCamelCase , "current" ) )
self.assertTrue(hasattr(__lowerCamelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__lowerCamelCase , "log.txt" ) , log_print=__lowerCamelCase , trace_memory_line_by_line=__lowerCamelCase , eager_mode=__lowerCamelCase , multi_process=__lowerCamelCase , )
a = TensorFlowBenchmark(__lowerCamelCase )
a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__lowerCamelCase , "log.txt" ) ).exists() )
| 720 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : int ) -> Dict:
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Optional[int] ) -> str:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , **__lowerCamelCase : Optional[int] ) -> Tuple:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self : int ) -> List[str]:
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCamelCase )
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=__lowerCamelCase )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = self.prepare_image_inputs()
a = image_processor(__lowerCamelCase , return_tensors="np" )
a = processor(images=__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : str ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__lowerCamelCase )
a = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=__lowerCamelCase , images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 662 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __magic_name__ ( A : Union[str, Any] ):
'''simple docstring'''
a = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$", A ).groups()[0]
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Dict=None , __lowerCamelCase : Union[str, Any]=None ) -> Tuple:
a = file_names
a = image_transform
a = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __lowerCamelCase : List[Any] ) -> int:
a = self.file_names[idx]
a = PIL.Image.open(__lowerCamelCase )
a = raw_image.convert("RGB" )
if self.image_transform is not None:
a = self.image_transform(__lowerCamelCase )
a = extract_label(__lowerCamelCase )
if self.label_to_id is not None:
a = self.label_to_id[label]
return {"image": image, "label": label}
def __magic_name__ ( A : str, A : int ):
'''simple docstring'''
if args.with_tracking:
a = Accelerator(
cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="all", project_dir=args.project_dir )
else:
a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a = config["lr"]
a = int(config["num_epochs"] )
a = int(config["seed"] )
a = int(config["batch_size"] )
a = config["image_size"]
if not isinstance(A, (list, tuple) ):
a = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps, "isdigit" ):
if args.checkpointing_steps == "epoch":
a = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
a = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
a = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
a = os.path.split(A )[-1].split("." )[0]
accelerator.init_trackers(A, A )
# Grab all the image filenames
a = [os.path.join(args.data_dir, A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
a = [extract_label(A ) for fname in file_names]
a = list(set(A ) )
id_to_label.sort()
a = {lbl: i for i, lbl in enumerate(A )}
# Set the seed before splitting the data.
np.random.seed(A )
torch.manual_seed(A )
torch.cuda.manual_seed_all(A )
# Split our filenames between train and validation
a = np.random.permutation(len(A ) )
a = int(0.8 * len(A ) )
a = random_perm[:cut]
a = random_perm[cut:]
# For training we use a simple RandomResizedCrop
a = Compose([RandomResizedCrop(A, scale=(0.5, 1.0) ), ToTensor()] )
a = PetsDataset(
[file_names[i] for i in train_split], image_transform=A, label_to_id=A )
# For evaluation, we use a deterministic Resize
a = Compose([Resize(A ), ToTensor()] )
a = PetsDataset([file_names[i] for i in eval_split], image_transform=A, label_to_id=A )
# Instantiate dataloaders.
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
a = DataLoader(A, shuffle=A, batch_size=A, num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a = create_model("resnet50d", pretrained=A, num_classes=len(A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
a = False
for param in model.get_classifier().parameters():
a = True
# We normalize the batches of images to be a bit faster.
a = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
a = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
a = torch.optim.Adam(params=model.parameters(), lr=lr / 25 )
# Instantiate learning rate scheduler
a = OneCycleLR(optimizer=A, max_lr=A, epochs=A, steps_per_epoch=len(A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a , a , a , a , a = accelerator.prepare(
A, A, A, A, A )
# We need to keep track of how many total steps we have iterated over
a = 0
# We also need to keep track of the starting epoch so files are named properly
a = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
a = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
a = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
a = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
a = os.path.splitext(A )[0]
if "epoch" in training_difference:
a = int(training_difference.replace("epoch_", "" ) ) + 1
a = None
else:
a = int(training_difference.replace("step_", "" ) )
a = resume_step // len(A )
resume_step -= starting_epoch * len(A )
# Now we train the model
for epoch in range(A, A ):
model.train()
if args.with_tracking:
a = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
a = accelerator.skip_first_batches(A, A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
a = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
a = model(A )
a = torch.nn.functional.cross_entropy(A, batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(A, A ):
a = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
model.eval()
a = 0
a = 0
for step, batch in enumerate(A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
a = {k: v.to(accelerator.device ) for k, v in batch.items()}
a = (batch["image"] - mean) / std
with torch.no_grad():
a = model(A )
a = outputs.argmax(dim=-1 )
a , a = accelerator.gather_for_metrics((predictions, batch["label"]) )
a = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
a = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(A ),
"epoch": epoch,
}, step=A, )
if checkpointing_steps == "epoch":
a = F"""epoch_{epoch}"""
if args.output_dir is not None:
a = os.path.join(args.output_dir, A )
accelerator.save_state(A )
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
'''simple docstring'''
a = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir", required=A, help="The data folder on disk." )
parser.add_argument("--fp16", action="store_true", help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision", type=A, default=A, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps", type=A, default=A, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", )
parser.add_argument(
"--output_dir", type=A, default=".", help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory.", )
parser.add_argument(
"--resume_from_checkpoint", type=A, default=A, help="If the training should continue from a checkpoint folder.", )
parser.add_argument(
"--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", )
parser.add_argument(
"--project_dir", type=A, default="logs", help="Location on where to store experiment tracking logs` and relevent project information", )
a = parser.parse_args()
a = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(A, A )
if __name__ == "__main__":
main()
| 662 | 0 |
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class __A ( tf.keras.layers.Layer ):
def __init__( self , a__ , a__ , a__ , a__ , a__=1 , a__=False , **a__ ):
super().__init__(**a__ )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Dict = d_embed
_lowerCAmelCase : str = d_proj
_lowerCAmelCase : Dict = cutoffs + [vocab_size]
_lowerCAmelCase : List[Any] = [0] + self.cutoffs
_lowerCAmelCase : List[str] = div_val
_lowerCAmelCase : List[str] = self.cutoffs[0]
_lowerCAmelCase : Any = len(self.cutoffs ) - 1
_lowerCAmelCase : Tuple = self.shortlist_size + self.n_clusters
_lowerCAmelCase : Dict = keep_order
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = []
def __A ( self , a__ ):
if self.n_clusters > 0:
_lowerCAmelCase : Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=a__ , name="""cluster_weight""" )
_lowerCAmelCase : Optional[Any] = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=a__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
_lowerCAmelCase : Dict = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=a__ , name=F"out_projs_._{i}" , )
self.out_projs.append(a__ )
else:
self.out_projs.append(a__ )
_lowerCAmelCase : List[str] = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=a__ , name=F"out_layers_._{i}_._weight" , )
_lowerCAmelCase : Any = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=a__ , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_lowerCAmelCase : Dict = self.d_embed // (self.div_val**i)
_lowerCAmelCase : List[str] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=a__ , name=F"out_projs_._{i}" )
self.out_projs.append(a__ )
_lowerCAmelCase : List[str] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=a__ , name=F"out_layers_._{i}_._weight" , )
_lowerCAmelCase : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=a__ , name=F"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(a__ )
@staticmethod
def __A ( a__ , a__ , a__ , a__=None ):
_lowerCAmelCase : Optional[int] = x
if proj is not None:
_lowerCAmelCase : Tuple = tf.einsum("""ibd,ed->ibe""" , a__ , a__ )
return tf.einsum("""ibd,nd->ibn""" , a__ , a__ ) + b
@staticmethod
def __A ( a__ , a__ ):
_lowerCAmelCase : int = shape_list(a__ )
_lowerCAmelCase : Dict = tf.range(lp_size[0] , dtype=target.dtype )
_lowerCAmelCase : str = tf.stack([r, target] , 1 )
return tf.gather_nd(a__ , a__ )
def __A ( self , a__ , a__ , a__=True , a__=False ):
_lowerCAmelCase : List[Any] = 0
if self.n_clusters == 0:
_lowerCAmelCase : Any = self._logit(a__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
_lowerCAmelCase : Optional[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=a__ , logits=a__ )
_lowerCAmelCase : Optional[Any] = tf.nn.log_softmax(a__ , axis=-1 )
else:
_lowerCAmelCase : str = shape_list(a__ )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Union[str, Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
_lowerCAmelCase : Optional[Any] = (target >= l_idx) & (target < r_idx)
_lowerCAmelCase : Union[str, Any] = tf.where(a__ )
_lowerCAmelCase : Union[str, Any] = tf.boolean_mask(a__ , a__ ) - l_idx
if self.div_val == 1:
_lowerCAmelCase : str = self.out_layers[0][0][l_idx:r_idx]
_lowerCAmelCase : Any = self.out_layers[0][1][l_idx:r_idx]
else:
_lowerCAmelCase : str = self.out_layers[i][0]
_lowerCAmelCase : int = self.out_layers[i][1]
if i == 0:
_lowerCAmelCase : Optional[Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
_lowerCAmelCase : Optional[Any] = tf.concat([cur_b, self.cluster_bias] , 0 )
_lowerCAmelCase : Dict = self._logit(a__ , a__ , a__ , self.out_projs[0] )
_lowerCAmelCase : int = tf.nn.log_softmax(a__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
_lowerCAmelCase : Union[str, Any] = tf.boolean_mask(a__ , a__ )
_lowerCAmelCase : Any = self._gather_logprob(a__ , a__ )
else:
_lowerCAmelCase : Union[str, Any] = self._logit(a__ , a__ , a__ , self.out_projs[i] )
_lowerCAmelCase : List[str] = tf.nn.log_softmax(a__ )
_lowerCAmelCase : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
_lowerCAmelCase : List[str] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(a__ )
if target is not None:
_lowerCAmelCase : List[Any] = tf.boolean_mask(a__ , a__ )
_lowerCAmelCase : Tuple = tf.boolean_mask(a__ , a__ )
_lowerCAmelCase : Any = self._gather_logprob(a__ , a__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(a__ , -cur_logprob , shape_list(a__ ) )
_lowerCAmelCase : str = tf.concat(a__ , axis=-1 )
if target is not None:
if return_mean:
_lowerCAmelCase : Optional[int] = tf.reduce_mean(a__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(a__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(a__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 663 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 1 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Callable ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Optional[Any] = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : Any = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : int = y[k] + step_size * ode_func(_lowerCamelCase ,y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : List[Any] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = "realm"
def __init__( self , a__=30522 , a__=768 , a__=128 , a__=12 , a__=12 , a__=8 , a__=3072 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=256 , a__=10 , a__=1e-3 , a__=5 , a__=320 , a__=13353718 , a__=5000 , a__=1 , a__=0 , a__=2 , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
# Common config
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = retriever_proj_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Union[str, Any] = num_candidates
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : Any = layer_norm_eps
# Reader config
_lowerCAmelCase : Any = span_hidden_size
_lowerCAmelCase : List[Any] = max_span_width
_lowerCAmelCase : Any = reader_layer_norm_eps
_lowerCAmelCase : Optional[int] = reader_beam_size
_lowerCAmelCase : Union[str, Any] = reader_seq_len
# Retrieval config
_lowerCAmelCase : List[Any] = num_block_records
_lowerCAmelCase : Dict = searcher_beam_size
| 663 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_a : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
_lowerCAmelCase : Union[str, Any] = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , a__ , standard_warn=a__ )
_lowerCAmelCase : str = dict(scheduler.config )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : List[Any] = FrozenDict(a__ )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
_lowerCAmelCase : Dict = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , a__ , standard_warn=a__ )
_lowerCAmelCase : Dict = dict(scheduler.config )
_lowerCAmelCase : Any = True
_lowerCAmelCase : Any = FrozenDict(a__ )
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=a__ , segmentation_processor=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , unet=a__ , scheduler=a__ , safety_checker=a__ , feature_extractor=a__ , )
def __A ( self , a__ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a__ )
def __A ( self ):
self.enable_attention_slicing(a__ )
def __A ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : int = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 50 , a__ = 7.5 , a__ = None , a__ = 1 , a__ = 0.0 , a__ = None , a__ = None , a__ = "pil" , a__ = True , a__ = None , a__ = 1 , **a__ , ):
_lowerCAmelCase : Any = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
_lowerCAmelCase : Dict = self.segmentation_model(**a__ )
_lowerCAmelCase : Tuple = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
_lowerCAmelCase : Optional[Any] = self.numpy_to_pil(a__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
_lowerCAmelCase : Optional[int] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=a__ , image=a__ , mask_image=a__ , height=a__ , width=a__ , num_inference_steps=a__ , guidance_scale=a__ , negative_prompt=a__ , num_images_per_prompt=a__ , eta=a__ , generator=a__ , latents=a__ , output_type=a__ , return_dict=a__ , callback=a__ , callback_steps=a__ , )
| 663 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 1 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : BigBirdConfig
_UpperCamelCase : jnp.dtype = jnp.floataa
_UpperCamelCase : bool = True
def __A ( self ):
super().setup()
_lowerCAmelCase : int = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *a__ , **a__ ):
_lowerCAmelCase : Union[str, Any] = super().__call__(*a__ , **a__ )
_lowerCAmelCase : Any = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ) -> List[str]:
def cross_entropy(_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Optional[Any]=None ):
_lowerCAmelCase : List[Any] = logits.shape[-1]
_lowerCAmelCase : str = (labels[..., None] == jnp.arange(_lowerCamelCase )[None]).astype("""f4""" )
_lowerCAmelCase : int = jax.nn.log_softmax(_lowerCamelCase ,axis=-1 )
_lowerCAmelCase : str = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
_lowerCAmelCase : Tuple = reduction(_lowerCamelCase )
return loss
_lowerCAmelCase : str = partial(_lowerCamelCase ,reduction=jnp.mean )
_lowerCAmelCase : List[Any] = cross_entropy(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = cross_entropy(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : List[str] = cross_entropy(_lowerCamelCase ,_lowerCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __A :
_UpperCamelCase : str = "google/bigbird-roberta-base"
_UpperCamelCase : int = 3_000
_UpperCamelCase : int = 10_500
_UpperCamelCase : int = 128
_UpperCamelCase : int = 3
_UpperCamelCase : int = 1
_UpperCamelCase : int = 5
# tx_args
_UpperCamelCase : float = 3E-5
_UpperCamelCase : float = 0.0
_UpperCamelCase : int = 20_000
_UpperCamelCase : float = 0.0_0_9_5
_UpperCamelCase : str = "bigbird-roberta-natural-questions"
_UpperCamelCase : str = "training-expt"
_UpperCamelCase : str = "data/nq-training.jsonl"
_UpperCamelCase : str = "data/nq-validation.jsonl"
def __A ( self ):
os.makedirs(self.base_dir , exist_ok=a__ )
_lowerCAmelCase : Union[str, Any] = os.path.join(self.base_dir , self.save_dir )
_lowerCAmelCase : int = self.batch_size_per_device * jax.device_count()
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , a__ ):
_lowerCAmelCase : Dict = self.collate_fn(a__ )
_lowerCAmelCase : Dict = jax.tree_util.tree_map(a__ , a__ )
return batch
def __A ( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase : str = self.fetch_inputs(features["""input_ids"""] )
_lowerCAmelCase : int = {
"""input_ids""": jnp.array(a__ , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(a__ , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = [self._fetch_inputs(a__ ) for ids in input_ids]
return zip(*a__ )
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = [1 for _ in range(len(a__ ) )]
while len(a__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : Any=None ) -> Any:
if seed is not None:
_lowerCAmelCase : Any = dataset.shuffle(seed=_lowerCamelCase )
for i in range(len(_lowerCamelCase ) // batch_size ):
_lowerCAmelCase : int = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_lowerCamelCase )
@partial(jax.pmap ,axis_name="""batch""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,**_lowerCamelCase : int ) -> str:
def loss_fn(_lowerCamelCase : Tuple ):
_lowerCAmelCase : List[Any] = model_inputs.pop("""start_labels""" )
_lowerCAmelCase : List[str] = model_inputs.pop("""end_labels""" )
_lowerCAmelCase : List[Any] = model_inputs.pop("""pooled_labels""" )
_lowerCAmelCase : List[Any] = state.apply_fn(**_lowerCamelCase ,params=_lowerCamelCase ,dropout_rng=_lowerCamelCase ,train=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = outputs
return state.loss_fn(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,)
_lowerCAmelCase , _lowerCAmelCase : List[Any] = jax.random.split(_lowerCamelCase )
_lowerCAmelCase : Tuple = jax.value_and_grad(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = grad_fn(state.params )
_lowerCAmelCase : Tuple = jax.lax.pmean({"""loss""": loss} ,axis_name="""batch""" )
_lowerCAmelCase : Dict = jax.lax.pmean(_lowerCamelCase ,"""batch""" )
_lowerCAmelCase : Tuple = state.apply_gradients(grads=_lowerCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name="""batch""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,**_lowerCamelCase : List[str] ) -> List[str]:
_lowerCAmelCase : Tuple = model_inputs.pop("""start_labels""" )
_lowerCAmelCase : Optional[Any] = model_inputs.pop("""end_labels""" )
_lowerCAmelCase : int = model_inputs.pop("""pooled_labels""" )
_lowerCAmelCase : Tuple = state.apply_fn(**_lowerCamelCase ,params=state.params ,train=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = outputs
_lowerCAmelCase : Tuple = state.loss_fn(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = jax.lax.pmean({"""loss""": loss} ,axis_name="""batch""" )
return metrics
class __A ( train_state.TrainState ):
_UpperCamelCase : Callable = struct.field(pytree_node=SCREAMING_SNAKE_CASE_ )
@dataclass
class __A :
_UpperCamelCase : Args
_UpperCamelCase : Callable
_UpperCamelCase : Callable
_UpperCamelCase : Callable
_UpperCamelCase : Callable
_UpperCamelCase : wandb
_UpperCamelCase : Callable = None
def __A ( self , a__ , a__ , a__ , a__=None ):
_lowerCAmelCase : str = model.params
_lowerCAmelCase : str = TrainState.create(
apply_fn=model.__call__ , params=a__ , tx=a__ , loss_fn=a__ , )
if ckpt_dir is not None:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = restore_checkpoint(a__ , a__ )
_lowerCAmelCase : Dict = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
_lowerCAmelCase , _lowerCAmelCase : str = build_tx(**a__ )
_lowerCAmelCase : str = train_state.TrainState(
step=a__ , apply_fn=model.__call__ , params=a__ , tx=a__ , opt_state=a__ , )
_lowerCAmelCase : int = args
_lowerCAmelCase : str = data_collator
_lowerCAmelCase : Optional[Any] = lr
_lowerCAmelCase : int = params
_lowerCAmelCase : Any = jax_utils.replicate(a__ )
return state
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = self.args
_lowerCAmelCase : Union[str, Any] = len(a__ ) // args.batch_size
_lowerCAmelCase : Optional[int] = jax.random.PRNGKey(0 )
_lowerCAmelCase : Any = jax.random.split(a__ , jax.device_count() )
for epoch in range(args.max_epochs ):
_lowerCAmelCase : str = jnp.array(0 , dtype=jnp.floataa )
_lowerCAmelCase : Optional[Any] = get_batched_dataset(a__ , args.batch_size , seed=a__ )
_lowerCAmelCase : Optional[Any] = 0
for batch in tqdm(a__ , total=a__ , desc=F"Running EPOCH-{epoch}" ):
_lowerCAmelCase : Optional[int] = self.data_collator(a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.train_step_fn(a__ , a__ , **a__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
_lowerCAmelCase : int = jax_utils.unreplicate(state.step )
_lowerCAmelCase : int = running_loss.item() / i
_lowerCAmelCase : Optional[Any] = self.scheduler_fn(state_step - 1 )
_lowerCAmelCase : str = self.evaluate(a__ , a__ )
_lowerCAmelCase : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(a__ ) )
self.logger.log(a__ , commit=a__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Dict = get_batched_dataset(a__ , self.args.batch_size )
_lowerCAmelCase : List[Any] = len(a__ ) // self.args.batch_size
_lowerCAmelCase : str = jnp.array(0 , dtype=jnp.floataa )
_lowerCAmelCase : List[str] = 0
for batch in tqdm(a__ , total=a__ , desc="""Evaluating ... """ ):
_lowerCAmelCase : Any = self.data_collator(a__ )
_lowerCAmelCase : Tuple = self.val_step_fn(a__ , **a__ )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def __A ( self , a__ , a__ ):
_lowerCAmelCase : int = jax_utils.unreplicate(a__ )
print(F"SAVING CHECKPOINT IN {save_dir}" , end=""" ... """ )
self.model_save_fn(a__ , params=state.params )
with open(os.path.join(a__ , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(a__ , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(a__ , """data_collator.joblib""" ) )
with open(os.path.join(a__ , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , a__ )
print("""DONE""" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
print(f"RESTORING CHECKPOINT FROM {save_dir}" ,end=""" ... """ )
with open(os.path.join(_lowerCamelCase ,"""flax_model.msgpack""" ) ,"""rb""" ) as f:
_lowerCAmelCase : List[Any] = from_bytes(state.params ,f.read() )
with open(os.path.join(_lowerCamelCase ,"""opt_state.msgpack""" ) ,"""rb""" ) as f:
_lowerCAmelCase : Optional[int] = from_bytes(state.opt_state ,f.read() )
_lowerCAmelCase : int = joblib.load(os.path.join(_lowerCamelCase ,"""args.joblib""" ) )
_lowerCAmelCase : List[Any] = joblib.load(os.path.join(_lowerCamelCase ,"""data_collator.joblib""" ) )
with open(os.path.join(_lowerCamelCase ,"""training_state.json""" ) ,"""r""" ) as f:
_lowerCAmelCase : Tuple = json.load(_lowerCamelCase )
_lowerCAmelCase : List[str] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : int ,_lowerCamelCase : Any ,_lowerCamelCase : str ) -> List[str]:
_lowerCAmelCase : Union[str, Any] = num_train_steps - warmup_steps
_lowerCAmelCase : Tuple = optax.linear_schedule(init_value=_lowerCamelCase ,end_value=_lowerCamelCase ,transition_steps=_lowerCamelCase )
_lowerCAmelCase : str = optax.linear_schedule(init_value=_lowerCamelCase ,end_value=1e-7 ,transition_steps=_lowerCamelCase )
_lowerCAmelCase : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Any ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ) -> str:
def weight_decay_mask(_lowerCamelCase : List[Any] ):
_lowerCAmelCase : List[str] = traverse_util.flatten_dict(_lowerCamelCase )
_lowerCAmelCase : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(_lowerCamelCase )
_lowerCAmelCase : List[Any] = scheduler_fn(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = optax.adamw(learning_rate=_lowerCamelCase ,weight_decay=_lowerCamelCase ,mask=_lowerCamelCase )
return tx, lr
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = IFInpaintingPipeline
_UpperCamelCase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
_UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCamelCase : Dict = PipelineTesterMixin.required_optional_params - {"latents"}
def __A ( self ):
return self._get_dummy_components()
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Dict = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __A ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __A ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __A ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __A ( self ):
self._test_save_load_local()
def __A ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 663 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Tuple = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
_a : Optional[Any] = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
_a : List[str] = {
'ctrl': 256,
}
_a : List[Any] = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ) -> Optional[int]:
_lowerCAmelCase : str = set()
_lowerCAmelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : List[str] = char
_lowerCAmelCase : str = set(_lowerCamelCase )
return pairs
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : int = CONTROL_CODES
def __init__( self , a__ , a__ , a__="<unk>" , **a__ ):
super().__init__(unk_token=a__ , **a__ )
with open(a__ , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase : Union[str, Any] = json.load(a__ )
_lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
with open(a__ , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase : List[str] = merges_handle.read().split("""\n""" )[1:-1]
_lowerCAmelCase : List[Any] = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : Optional[int] = {}
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , a__ ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : int = tuple(a__ )
_lowerCAmelCase : Tuple = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_lowerCAmelCase : List[str] = get_pairs(a__ )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = bigram
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = 0
while i < len(a__ ):
try:
_lowerCAmelCase : Dict = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Any = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Dict = tuple(a__ )
_lowerCAmelCase : List[Any] = new_word
if len(a__ ) == 1:
break
else:
_lowerCAmelCase : Optional[int] = get_pairs(a__ )
_lowerCAmelCase : Any = """@@ """.join(a__ )
_lowerCAmelCase : List[Any] = word[:-4]
_lowerCAmelCase : Optional[int] = word
return word
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = re.findall(r"""\S+\n?""" , a__ )
for token in words:
split_tokens.extend(list(self.bpe(a__ ).split(""" """ ) ) )
return split_tokens
def __A ( self , a__ ):
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def __A ( self , a__ ):
return self.decoder.get(a__ , self.unk_token )
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = """ """.join(a__ ).replace("""@@ """ , """""" ).strip()
return out_string
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Any = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(a__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + """\n""" )
_lowerCAmelCase : Optional[int] = 0
with open(a__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase : List[Any] = token_index
writer.write(""" """.join(a__ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 663 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_a : List[Any] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
_a : Dict = 8.9_88e9 # units = N * m^s * C^-2
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ,_lowerCamelCase : float ) -> dict[str, float]:
_lowerCAmelCase : List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
_lowerCAmelCase : str = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_lowerCAmelCase : Any = abs(_lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_lowerCAmelCase : int = abs(_lowerCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_lowerCAmelCase : Optional[Any] = (COULOMBS_CONSTANT * charge_product / abs(_lowerCamelCase )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 1 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_a : Tuple = pytest.mark.integration
@require_faiss
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
_lowerCAmelCase : Any = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(a__ ) for x in np.arange(30 ).tolist()]} )
return dset
def __A ( self ):
import faiss
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
_lowerCAmelCase : Dict = dset.map(
lambda a__ , a__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=a__ , keep_in_memory=a__ )
_lowerCAmelCase : List[Any] = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowerCAmelCase , _lowerCAmelCase : int = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def __A ( self ):
import faiss
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __A ( self ):
import faiss
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a__ ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __A ( self ):
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(a__ , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def __A ( self ):
from elasticsearch import Elasticsearch
_lowerCAmelCase : Dataset = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
_lowerCAmelCase : Union[str, Any] = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
_lowerCAmelCase : Union[str, Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
_lowerCAmelCase : List[Any] = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=a__ )
_lowerCAmelCase , _lowerCAmelCase : List[str] = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
import faiss
_lowerCAmelCase : Dict = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_lowerCAmelCase : List[Any] = np.zeros(5 , dtype=np.floataa )
_lowerCAmelCase : Any = 1
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = index.search(a__ )
self.assertRaises(a__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_lowerCAmelCase : List[Any] = np.eye(5 , dtype=np.floataa )[::-1]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = index.search_batch(a__ )
self.assertRaises(a__ , index.search_batch , queries[0] )
_lowerCAmelCase : int = [scores[0] for scores in total_scores]
_lowerCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , a__ )
def __A ( self ):
import faiss
_lowerCAmelCase : Tuple = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_lowerCAmelCase : Dict = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(a__ ):
_lowerCAmelCase : int = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def __A ( self ):
import faiss
_lowerCAmelCase : Union[str, Any] = faiss.IndexFlat(5 )
_lowerCAmelCase : int = FaissIndex(custom_index=a__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __A ( self ):
import faiss
_lowerCAmelCase : List[str] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a__ ) as tmp_file:
index.save(tmp_file.name )
_lowerCAmelCase : Tuple = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_lowerCAmelCase : Optional[Any] = np.zeros(5 , dtype=np.floataa )
_lowerCAmelCase : int = 1
_lowerCAmelCase , _lowerCAmelCase : List[str] = index.search(a__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Tuple:
import faiss
_lowerCAmelCase : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
_lowerCAmelCase : Tuple = """index.faiss"""
_lowerCAmelCase : Optional[int] = f"mock://{index_name}"
index.save(_lowerCamelCase ,storage_options=mockfs.storage_options )
_lowerCAmelCase : Optional[Any] = FaissIndex.load(_lowerCamelCase ,storage_options=mockfs.storage_options )
_lowerCAmelCase : Dict = np.zeros(5 ,dtype=np.floataa )
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase , _lowerCAmelCase : Tuple = index.search(_lowerCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class __A ( SCREAMING_SNAKE_CASE_ ):
def __A ( self ):
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
_lowerCAmelCase : Optional[Any] = Elasticsearch()
_lowerCAmelCase : List[str] = {"""acknowledged""": True}
_lowerCAmelCase : str = ElasticSearchIndex(es_client=a__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
_lowerCAmelCase : List[str] = """foo"""
_lowerCAmelCase : str = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = index.search(a__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_lowerCAmelCase : Optional[Any] = """foo"""
_lowerCAmelCase : Union[str, Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
_lowerCAmelCase , _lowerCAmelCase : Dict = index.search(a__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_lowerCAmelCase : Tuple = ["""foo""", """bar""", """foobar"""]
_lowerCAmelCase : Optional[Any] = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
_lowerCAmelCase , _lowerCAmelCase : Tuple = index.search_batch(a__ )
_lowerCAmelCase : Any = [scores[0] for scores in total_scores]
_lowerCAmelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a__ ) , 0 )
self.assertListEqual([1, 1, 1] , a__ )
# batched queries with timeout
_lowerCAmelCase : Union[str, Any] = ["""foo""", """bar""", """foobar"""]
_lowerCAmelCase : int = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = index.search_batch(a__ , request_timeout=30 )
_lowerCAmelCase : Union[str, Any] = [scores[0] for scores in total_scores]
_lowerCAmelCase : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a__ ) , 0 )
self.assertListEqual([1, 1, 1] , a__ )
| 663 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : int = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "pegasus"
_UpperCamelCase : int = ["past_key_values"]
_UpperCamelCase : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a__=50265 , a__=1024 , a__=12 , a__=4096 , a__=16 , a__=12 , a__=4096 , a__=16 , a__=0.0 , a__=0.0 , a__=True , a__=True , a__="gelu" , a__=1024 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=0 , a__=False , a__=0 , a__=1 , a__=1 , **a__ , ):
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Dict = d_model
_lowerCAmelCase : List[str] = encoder_ffn_dim
_lowerCAmelCase : Any = encoder_layers
_lowerCAmelCase : Any = encoder_attention_heads
_lowerCAmelCase : Union[str, Any] = decoder_ffn_dim
_lowerCAmelCase : Union[str, Any] = decoder_layers
_lowerCAmelCase : List[Any] = decoder_attention_heads
_lowerCAmelCase : Tuple = dropout
_lowerCAmelCase : Dict = attention_dropout
_lowerCAmelCase : Any = activation_dropout
_lowerCAmelCase : Optional[Any] = activation_function
_lowerCAmelCase : Optional[int] = init_std
_lowerCAmelCase : Any = encoder_layerdrop
_lowerCAmelCase : Tuple = decoder_layerdrop
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : int = encoder_layers
_lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , forced_eos_token_id=a__ , **a__ , )
@property
def __A ( self ):
return self.encoder_attention_heads
@property
def __A ( self ):
return self.d_model
| 663 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Dict ) -> List[Any]:
_lowerCAmelCase : Dict = s.rsplit(_lowerCamelCase ,_lowerCamelCase )
return new.join(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Dict:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Dict:
_lowerCAmelCase : str = {}
_lowerCAmelCase : Optional[int] = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_lowerCAmelCase : Optional[Any] = key.replace(f"{group_key}." ,f"{group_key}.group." )
if "res_path" in key:
_lowerCAmelCase : List[Any] = key.replace("""res_path.""" ,"""res_path.path.""" )
if key.endswith(""".w""" ):
_lowerCAmelCase : Optional[int] = rreplace(_lowerCamelCase ,""".w""" ,""".weight""" ,1 )
if key.endswith(""".b""" ):
_lowerCAmelCase : Dict = rreplace(_lowerCamelCase ,""".b""" ,""".bias""" ,1 )
_lowerCAmelCase : Tuple = value.float()
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Optional[Any]=None ,_lowerCamelCase : List[str]=True ) -> Optional[Any]:
from dall_e import Encoder
_lowerCAmelCase : Optional[Any] = Encoder()
if os.path.exists(_lowerCamelCase ):
_lowerCAmelCase : int = torch.load(_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_lowerCamelCase )
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Any = ckpt.state_dict()
encoder.load_state_dict(_lowerCamelCase )
if config_path is not None:
_lowerCAmelCase : Tuple = FlavaImageCodebookConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = FlavaImageCodebookConfig()
_lowerCAmelCase : Optional[int] = FlavaImageCodebook(_lowerCamelCase ).eval()
_lowerCAmelCase : Any = encoder.state_dict()
_lowerCAmelCase : List[str] = upgrade_state_dict(_lowerCamelCase )
hf_model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : Any = hf_model.state_dict()
_lowerCAmelCase : Optional[Any] = count_parameters(_lowerCamelCase )
_lowerCAmelCase : int = count_parameters(_lowerCamelCase )
assert torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(_lowerCamelCase )
else:
return hf_state_dict
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a : Optional[Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 663 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Tuple = ArgumentParser("""Accelerate CLI tool""" ,usage="""accelerate <command> [<args>]""" ,allow_abbrev=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=_lowerCamelCase )
env_command_parser(subparsers=_lowerCamelCase )
launch_command_parser(subparsers=_lowerCamelCase )
tpu_command_parser(subparsers=_lowerCamelCase )
test_command_parser(subparsers=_lowerCamelCase )
# Let's go
_lowerCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(_lowerCamelCase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowerCamelCase )
if __name__ == "__main__":
main()
| 663 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 1 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __A ( SCREAMING_SNAKE_CASE_ ):
@require_torch
def __A ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCAmelCase : int = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_lowerCAmelCase : str = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_lowerCAmelCase : int = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_lowerCAmelCase : Optional[Any] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(a__ )
BertModel.from_pretrained(a__ )
BertTokenizer.from_pretrained(a__ )
pipeline(task="""fill-mask""" , model=a__ )
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : Any = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_lowerCAmelCase : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase : Dict = """1"""
_lowerCAmelCase : Optional[int] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __A ( self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCAmelCase : Dict = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
_lowerCAmelCase : List[str] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
_lowerCAmelCase : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
_lowerCAmelCase : Dict = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(a__ )
BertModel.from_pretrained(a__ )
BertTokenizer.from_pretrained(a__ )
pipeline(task="""fill-mask""" , model=a__ )
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : int = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
_lowerCAmelCase : List[Any] = self.get_env()
_lowerCAmelCase : List[str] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __A ( self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_lowerCAmelCase : str = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
_lowerCAmelCase : int = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
_lowerCAmelCase : Union[str, Any] = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : int = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_lowerCAmelCase : Any = self.get_env()
_lowerCAmelCase : int = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
_lowerCAmelCase : Optional[int] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase : Any = """1"""
_lowerCAmelCase : List[Any] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __A ( self ):
_lowerCAmelCase : Optional[Any] = """
from transformers import pipeline
"""
_lowerCAmelCase : List[Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
_lowerCAmelCase : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
_lowerCAmelCase : List[Any] = self.get_env()
_lowerCAmelCase : Union[str, Any] = """1"""
_lowerCAmelCase : Tuple = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
_lowerCAmelCase : Tuple = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def __A ( self ):
_lowerCAmelCase : List[str] = """
from transformers import AutoModel
"""
_lowerCAmelCase : Optional[Any] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
_lowerCAmelCase : Tuple = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
_lowerCAmelCase : Dict = self.get_env()
_lowerCAmelCase : Tuple = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCAmelCase : Dict = """1"""
_lowerCAmelCase : List[Any] = subprocess.run(a__ , env=a__ , check=a__ , capture_output=a__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 663 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Dict = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = "encoder-decoder"
_UpperCamelCase : Optional[int] = True
def __init__( self , **a__ ):
super().__init__(**a__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_lowerCAmelCase : List[Any] = kwargs.pop("""encoder""" )
_lowerCAmelCase : int = encoder_config.pop("""model_type""" )
_lowerCAmelCase : List[Any] = kwargs.pop("""decoder""" )
_lowerCAmelCase : Optional[Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
_lowerCAmelCase : List[str] = AutoConfig.for_model(a__ , **a__ )
_lowerCAmelCase : Optional[Any] = AutoConfig.for_model(a__ , **a__ )
_lowerCAmelCase : List[Any] = True
@classmethod
def __A ( cls , a__ , a__ , **a__ ):
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
_lowerCAmelCase : Dict = True
_lowerCAmelCase : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
_lowerCAmelCase : List[str] = self.encoder.to_dict()
_lowerCAmelCase : Optional[int] = self.decoder.to_dict()
_lowerCAmelCase : List[Any] = self.__class__.model_type
return output
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_a : Tuple = None
_a : List[str] = logging.get_logger(__name__)
_a : Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a : Tuple = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
_a : Dict = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
_a : Optional[Any] = '▁'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[str] = ["input_ids", "attention_mask"]
_UpperCamelCase : List[str] = BarthezTokenizer
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Optional[int] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , **a__ , )
_lowerCAmelCase : Optional[Any] = vocab_file
_lowerCAmelCase : Dict = False if not self.vocab_file else True
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
_lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = [self.sep_token_id]
_lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : List[str] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 663 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 1 |
"""simple docstring"""
_a : List[str] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : dict ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple ) -> list[str]:
_lowerCAmelCase : str = set()
# keep track of all the paths to be checked
_lowerCAmelCase : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_lowerCAmelCase : Optional[int] = queue.pop(0 )
# get the last node from the path
_lowerCAmelCase : Union[str, Any] = path[-1]
if node not in explored:
_lowerCAmelCase : Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_lowerCAmelCase : Optional[int] = list(_lowerCamelCase )
new_path.append(_lowerCamelCase )
queue.append(_lowerCamelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_lowerCamelCase )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : dict ,_lowerCamelCase : int ,_lowerCamelCase : List[str] ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_lowerCAmelCase : Tuple = [start]
_lowerCAmelCase : Any = set(_lowerCamelCase )
# Keep tab on distances from `start` node.
_lowerCAmelCase : int = {start: 0, target: -1}
while queue:
_lowerCAmelCase : Dict = queue.pop(0 )
if node == target:
_lowerCAmelCase : Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] ,dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_lowerCamelCase )
queue.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 663 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 1 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
_a : Any = parser.parse_args()
_a : int = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_a : Optional[int] = CLIPImageProcessor()
_a : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
_a : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 1 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
class __A :
_UpperCamelCase : str
_UpperCamelCase : str = None
@staticmethod
def __A ( ):
raise NotImplementedError
def __A ( self , a__ , a__ , a__ , **a__ ):
raise NotImplementedError
def __A ( self , a__ ):
raise NotImplementedError
def __A ( self ):
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def __A ( cls ):
return F"`pip install {cls.pip_package or cls.name}`"
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "optuna"
@staticmethod
def __A ( ):
return is_optuna_available()
def __A ( self , a__ , a__ , a__ , **a__ ):
return run_hp_search_optuna(a__ , a__ , a__ , **a__ )
def __A ( self , a__ ):
return default_hp_space_optuna(a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = "ray"
_UpperCamelCase : Tuple = "'ray[tune]'"
@staticmethod
def __A ( ):
return is_ray_available()
def __A ( self , a__ , a__ , a__ , **a__ ):
return run_hp_search_ray(a__ , a__ , a__ , **a__ )
def __A ( self , a__ ):
return default_hp_space_ray(a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "sigopt"
@staticmethod
def __A ( ):
return is_sigopt_available()
def __A ( self , a__ , a__ , a__ , **a__ ):
return run_hp_search_sigopt(a__ , a__ , a__ , **a__ )
def __A ( self , a__ ):
return default_hp_space_sigopt(a__ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = "wandb"
@staticmethod
def __A ( ):
return is_wandb_available()
def __A ( self , a__ , a__ , a__ , **a__ ):
return run_hp_search_wandb(a__ , a__ , a__ , **a__ )
def __A ( self , a__ ):
return default_hp_space_wandb(a__ )
_a : Tuple = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : str = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Optional[Any] = available_backends[0].name
if len(_lowerCamelCase ) > 1:
logger.info(
f"{len(_lowerCamelCase )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"""No hyperparameter search backend available.\n"""
+ """\n""".join(
f" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
_a : Optional[int] = list[list[float | int]]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Matrix ,_lowerCamelCase : Matrix ) -> Matrix:
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(size + 1 )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : float
for row in range(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Any = matrix[row][col]
_lowerCAmelCase : Optional[Any] = vector[row][0]
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = 0
while row < size and col < size:
# pivoting
_lowerCAmelCase : Dict = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowerCamelCase ,_lowerCamelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowerCAmelCase , _lowerCAmelCase : Any = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 ,_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = augmented[rowa][col] / augmented[row][col]
_lowerCAmelCase : str = 0
for cola in range(col + 1 ,size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 ,_lowerCamelCase ):
for row in range(_lowerCamelCase ):
_lowerCAmelCase : Any = augmented[row][col] / augmented[col][col]
for cola in range(_lowerCamelCase ,size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] ,10 )] for row in range(_lowerCamelCase )
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> Callable[[int], int]:
_lowerCAmelCase : int = len(_lowerCamelCase )
_lowerCAmelCase : Matrix = [[0 for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix = [[0] for _ in range(_lowerCamelCase )]
_lowerCAmelCase : Matrix
_lowerCAmelCase : int
_lowerCAmelCase : int
_lowerCAmelCase : int
for x_val, y_val in enumerate(_lowerCamelCase ):
for col in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = (x_val + 1) ** (size - col - 1)
_lowerCAmelCase : Any = y_val
_lowerCAmelCase : List[str] = solve(_lowerCamelCase ,_lowerCamelCase )
def interpolated_func(_lowerCamelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowerCamelCase ) )
return interpolated_func
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Callable[[int], int] = question_function ,_lowerCamelCase : int = 10 ) -> int:
_lowerCAmelCase : list[int] = [func(_lowerCamelCase ) for x_val in range(1 ,order + 1 )]
_lowerCAmelCase : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 ,order + 1 )
]
_lowerCAmelCase : int = 0
_lowerCAmelCase : Callable[[int], int]
_lowerCAmelCase : int
for poly in polynomials:
_lowerCAmelCase : Tuple = 1
while func(_lowerCamelCase ) == poly(_lowerCamelCase ):
x_val += 1
ret += poly(_lowerCamelCase )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""")
| 663 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 1 |
"""simple docstring"""
class __A :
def __init__( self ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : int = 0
_lowerCAmelCase : Tuple = {}
def __A ( self , a__ ):
if vertex not in self.adjacency:
_lowerCAmelCase : Any = {}
self.num_vertices += 1
def __A ( self , a__ , a__ , a__ ):
self.add_vertex(a__ )
self.add_vertex(a__ )
if head == tail:
return
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : int = weight
def __A ( self ):
_lowerCAmelCase : Dict = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight) )
for i in range(len(a__ ) ):
_lowerCAmelCase : List[Any] = list(edges[i] )
edges.sort(key=lambda a__ : e[2] )
for i in range(len(a__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = edge
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self ):
_lowerCAmelCase : Any = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : Optional[int] = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip("""\n""" )
def __A ( self ):
_lowerCAmelCase : List[str] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __A ( self ):
return self.adjacency.keys()
@staticmethod
def __A ( a__=None , a__=None ):
_lowerCAmelCase : Optional[int] = Graph()
if vertices is None:
_lowerCAmelCase : int = []
if edges is None:
_lowerCAmelCase : Optional[Any] = []
for vertex in vertices:
g.add_vertex(a__ )
for edge in edges:
g.add_edge(*a__ )
return g
class __A :
def __init__( self ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : int = {}
def __len__( self ):
return len(self.parent )
def __A ( self , a__ ):
if item in self.parent:
return self.find(a__ )
_lowerCAmelCase : List[str] = item
_lowerCAmelCase : Dict = 0
return item
def __A ( self , a__ ):
if item not in self.parent:
return self.make_set(a__ )
if item != self.parent[item]:
_lowerCAmelCase : List[str] = self.find(self.parent[item] )
return self.parent[item]
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = self.find(a__ )
_lowerCAmelCase : Any = self.find(a__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : str = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : str = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : str = roota
return roota
return None
@staticmethod
def __A ( a__ ):
_lowerCAmelCase : str = graph.num_vertices
_lowerCAmelCase : Tuple = Graph.UnionFind()
_lowerCAmelCase : Union[str, Any] = []
while num_components > 1:
_lowerCAmelCase : List[Any] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : List[Any] = -1
_lowerCAmelCase : Tuple = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = edge
_lowerCAmelCase : List[str] = union_find.find(a__ )
_lowerCAmelCase : List[str] = union_find.find(a__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : int = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = cheap_edge[vertex]
if union_find.find(a__ ) != union_find.find(a__ ):
union_find.union(a__ , a__ )
mst_edges.append(cheap_edge[vertex] )
_lowerCAmelCase : Optional[int] = num_components - 1
_lowerCAmelCase : Optional[int] = Graph.build(edges=a__ )
return mst
| 663 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_a : Any = parser.parse_args()
if args.model_type == "bert":
_a : Any = BertForMaskedLM.from_pretrained(args.model_name)
_a : str = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
_a : Tuple = model.state_dict()
_a : Optional[int] = {}
for w in ["word_embeddings", "position_embeddings"]:
_a : str = state_dict[F"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
_a : Any = state_dict[F"""{prefix}.embeddings.LayerNorm.{w}"""]
_a : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_a : Dict = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
_a : Tuple = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
_a : Optional[Any] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
_a : Optional[int] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
_a : List[str] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
_a : str = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
_a : Dict = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
_a : Optional[int] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
_a : int = state_dict['cls.predictions.decoder.weight']
_a : List[str] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
_a : Optional[int] = state_dict[F"""cls.predictions.transform.dense.{w}"""]
_a : Tuple = state_dict[F"""cls.predictions.transform.LayerNorm.{w}"""]
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 663 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_a : Optional[Any] = re.compile(r'\s+')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Dict:
return {"hash": hashlib.mda(re.sub(_lowerCamelCase ,"""""" ,example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [len(_lowerCamelCase ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(_lowerCamelCase ), "line_max": max(_lowerCamelCase )}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Any:
_lowerCAmelCase : Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Any ) -> str:
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Any=5 ) -> int:
_lowerCAmelCase : str = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_lowerCAmelCase : List[str] = example["""content"""].splitlines()
for _, line in zip(range(_lowerCamelCase ) ,_lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Any=5 ,_lowerCamelCase : Any=0.05 ) -> Dict:
_lowerCAmelCase : Tuple = ["""unit tests""", """test file""", """configuration file"""]
_lowerCAmelCase : Any = example["""content"""].splitlines()
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = 0
# first test
for _, line in zip(range(_lowerCamelCase ) ,_lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_lowerCAmelCase : Tuple = example["""content"""].count("""\n""" )
_lowerCAmelCase : Optional[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> int:
_lowerCAmelCase : Any = ["""def """, """class """, """for """, """while """]
_lowerCAmelCase : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Union[str, Any]=4 ) -> str:
_lowerCAmelCase : Any = example["""content"""].splitlines()
_lowerCAmelCase : Tuple = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = tokenizer(example["""content"""] ,truncation=_lowerCamelCase )["""input_ids"""]
_lowerCAmelCase : int = len(example["""content"""] ) / len(_lowerCamelCase )
return {"ratio": ratio}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Dict:
_lowerCAmelCase : List[Any] = {}
results.update(get_hash(_lowerCamelCase ) )
results.update(line_stats(_lowerCamelCase ) )
results.update(alpha_stats(_lowerCamelCase ) )
results.update(char_token_ratio(_lowerCamelCase ) )
results.update(is_autogenerated(_lowerCamelCase ) )
results.update(is_config_or_test(_lowerCamelCase ) )
results.update(has_no_keywords(_lowerCamelCase ) )
results.update(has_few_assignments(_lowerCamelCase ) )
return results
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ,_lowerCamelCase : int ) -> Tuple:
if not check_uniques(_lowerCamelCase ,_lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Union[str, Any]:
with open(_lowerCamelCase ,"""rb""" ) as f_in:
with gzip.open(str(_lowerCamelCase ) + """.gz""" ,"""wb""" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(_lowerCamelCase ,_lowerCamelCase )
os.unlink(_lowerCamelCase )
# Settings
_a : Dict = HfArgumentParser(PreprocessingArguments)
_a : Dict = parser.parse_args()
if args.num_workers is None:
_a : str = multiprocessing.cpu_count()
_a : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_a : Dict = time.time()
_a : int = load_dataset(args.dataset_name, split='train')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
_a : List[Any] = time.time()
_a : Any = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
_a : Optional[int] = set(ds.unique('hash'))
_a : Dict = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
_a : Dict = time.time()
_a : Dict = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_a : Optional[int] = time.time()
_a , _a : Union[str, Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
_a : List[str] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
_a : Dict = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
_a : Any = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_a : Union[str, Any] = str(data_dir / F"""file-{file_number+1:012}.json""")
_a : List[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 663 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 1 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_a : Optional[Any] = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_a : Optional[Any] = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_a : Any = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_a : Optional[Any] = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] , reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] , )
def __A ( self , a__ ):
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def __A ( self , a__ , a__ , a__=0.9 , a__=3 , a__=0.5 ):
if NLTK_VERSION >= version.Version("""3.6.5""" ):
_lowerCAmelCase : List[str] = [
meteor_score.single_meteor_score(
word_tokenize(a__ ) , word_tokenize(a__ ) , alpha=a__ , beta=a__ , gamma=a__ )
for ref, pred in zip(a__ , a__ )
]
else:
_lowerCAmelCase : Tuple = [
meteor_score.single_meteor_score(a__ , a__ , alpha=a__ , beta=a__ , gamma=a__ )
for ref, pred in zip(a__ , a__ )
]
return {"meteor": np.mean(a__ )}
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def __A ( *a__ , **a__ ):
pass
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ) -> str:
_lowerCAmelCase : int = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ) -> Dict:
_lowerCAmelCase : Any = np.array(_lowerCamelCase )
_lowerCAmelCase : Tuple = npimg.shape
return {"hash": hashimage(_lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A ( unittest.TestCase ):
_UpperCamelCase : List[str] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCamelCase : Tuple = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : int = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __A ( self , a__ , a__ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def __A ( self ):
pass
@slow
@require_torch
def __A ( self ):
_lowerCAmelCase : Optional[int] = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
_lowerCAmelCase : Optional[Any] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9_9_6_7},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_9_3},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9_9_0_9},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9_8_7_9},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9_8_3_4},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9_7_1_6},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9_6_1_2},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9_5_9_9},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9_5_5_2},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9_5_3_2},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9_5_1_6},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9_4_9_9},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9_4_8_3},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9_4_6_4},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9_4_0_8},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9_3_3_5},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9_3_2_6},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9_2_6_2},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8_9_9_9},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8_9_8_6},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8_9_8_4},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8_8_7_3},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __A ( self ):
_lowerCAmelCase : List[str] = """facebook/sam-vit-huge"""
_lowerCAmelCase : Dict = pipeline("""mask-generation""" , model=a__ )
_lowerCAmelCase : Any = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : Any = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1_0},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
] , )
| 663 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
_a : Optional[Any] = logging.get_logger(__name__)
_a : Tuple = 'Hello world! cécé herlolip'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ,_lowerCamelCase : bool ) -> Any:
_lowerCAmelCase : Tuple = FairseqRobertaModel.from_pretrained(_lowerCamelCase )
roberta.eval() # disable dropout
_lowerCAmelCase : Tuple = roberta.model.encoder.sentence_encoder
_lowerCAmelCase : List[str] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=514 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,)
if classification_head:
_lowerCAmelCase : Tuple = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = XLMRobertaXLForSequenceClassification(_lowerCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_lowerCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
_lowerCAmelCase : List[str] = roberta_sent_encoder.embed_tokens.weight
_lowerCAmelCase : Optional[int] = roberta_sent_encoder.embed_positions.weight
_lowerCAmelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_lowerCAmelCase : List[Any] = roberta_sent_encoder.layer_norm.weight
_lowerCAmelCase : Union[str, Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_lowerCAmelCase : BertLayer = model.roberta.encoder.layer[i]
_lowerCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
_lowerCAmelCase : RobertaAttention = layer.attention
_lowerCAmelCase : str = roberta_layer.self_attn_layer_norm.weight
_lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
_lowerCAmelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_lowerCAmelCase : List[str] = roberta_layer.self_attn.q_proj.weight
_lowerCAmelCase : List[str] = roberta_layer.self_attn.q_proj.bias
_lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.k_proj.weight
_lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.k_proj.bias
_lowerCAmelCase : Optional[int] = roberta_layer.self_attn.v_proj.weight
_lowerCAmelCase : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
_lowerCAmelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_lowerCAmelCase : Any = roberta_layer.self_attn.out_proj.weight
_lowerCAmelCase : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_lowerCAmelCase : Any = roberta_layer.final_layer_norm.weight
_lowerCAmelCase : str = roberta_layer.final_layer_norm.bias
# intermediate
_lowerCAmelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase : Optional[int] = roberta_layer.fca.weight
_lowerCAmelCase : Any = roberta_layer.fca.bias
# output
_lowerCAmelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_lowerCAmelCase : Optional[Any] = roberta_layer.fca.weight
_lowerCAmelCase : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
_lowerCAmelCase : Dict = roberta.model.classification_heads["""mnli"""].dense.weight
_lowerCAmelCase : List[Any] = roberta.model.classification_heads["""mnli"""].dense.bias
_lowerCAmelCase : Optional[int] = roberta.model.classification_heads["""mnli"""].out_proj.weight
_lowerCAmelCase : Optional[int] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
_lowerCAmelCase : Union[str, Any] = roberta.model.encoder.lm_head.dense.weight
_lowerCAmelCase : Any = roberta.model.encoder.lm_head.dense.bias
_lowerCAmelCase : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.weight
_lowerCAmelCase : Dict = roberta.model.encoder.lm_head.layer_norm.bias
_lowerCAmelCase : Any = roberta.model.encoder.lm_head.weight
_lowerCAmelCase : Union[str, Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_lowerCAmelCase : torch.Tensor = roberta.encode(_lowerCamelCase ).unsqueeze(0 ) # batch of size 1
_lowerCAmelCase : Union[str, Any] = model(_lowerCamelCase )[0]
if classification_head:
_lowerCAmelCase : List[Any] = roberta.model.classification_heads["""mnli"""](roberta.extract_features(_lowerCamelCase ) )
else:
_lowerCAmelCase : Tuple = roberta.model(_lowerCamelCase )[0]
print(our_output.shape ,their_output.shape )
_lowerCAmelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
_lowerCAmelCase : List[Any] = torch.allclose(_lowerCamelCase ,_lowerCamelCase ,atol=1e-3 )
print("""Do both models output the same tensors?""" ,"""🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(_lowerCamelCase ).mkdir(parents=_lowerCamelCase ,exist_ok=_lowerCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
_a : Any = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 663 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> list[int]:
_lowerCAmelCase : Any = [True] * limit
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Any = False
_lowerCAmelCase : List[Any] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
_lowerCAmelCase : str = i * 2
while index < limit:
_lowerCAmelCase : Any = False
_lowerCAmelCase : str = index + i
_lowerCAmelCase : Tuple = [2]
for i in range(3 ,_lowerCamelCase ,2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : Union[str, Any] = prime_sieve(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : List[str] = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length ,len(_lowerCamelCase ) ):
_lowerCAmelCase : str = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
_lowerCAmelCase : List[Any] = j - i
_lowerCAmelCase : str = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : int = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = "sew-d"
def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__=2 , a__=512 , a__=256 , a__=True , a__=True , a__=("p2c", "c2p") , a__="layer_norm" , a__="gelu_python" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.1 , a__=0.0_2 , a__=1e-7 , a__=1e-5 , a__="group" , a__="gelu" , a__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a__=False , a__=128 , a__=16 , a__=True , a__=0.0_5 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__="mean" , a__=False , a__=False , a__=256 , a__=0 , a__=1 , a__=2 , **a__ , ):
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[Any] = feat_extract_norm
_lowerCAmelCase : Any = feat_extract_activation
_lowerCAmelCase : Optional[int] = list(a__ )
_lowerCAmelCase : Dict = list(a__ )
_lowerCAmelCase : Tuple = list(a__ )
_lowerCAmelCase : int = conv_bias
_lowerCAmelCase : Any = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : int = len(self.conv_dim )
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : int = squeeze_factor
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : Optional[Any] = position_buckets
_lowerCAmelCase : List[str] = share_att_key
_lowerCAmelCase : Optional[Any] = relative_attention
_lowerCAmelCase : str = norm_rel_ebd
_lowerCAmelCase : Any = list(a__ )
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : List[str] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Union[str, Any] = activation_dropout
_lowerCAmelCase : Dict = feat_proj_dropout
_lowerCAmelCase : int = final_dropout
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : List[Any] = feature_layer_norm_eps
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : List[str] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : List[Any] = apply_spec_augment
_lowerCAmelCase : List[Any] = mask_time_prob
_lowerCAmelCase : Any = mask_time_length
_lowerCAmelCase : int = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : List[str] = mask_feature_length
_lowerCAmelCase : Any = mask_feature_min_masks
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : List[str] = ctc_zero_infinity
# sequence classification
_lowerCAmelCase : Union[str, Any] = use_weighted_layer_sum
_lowerCAmelCase : Optional[int] = classifier_proj_size
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 1 |
"""simple docstring"""
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_a : Any = float('nan')
class __A :
def __init__( self , a__ ):
_lowerCAmelCase : Optional[int] = sys.stdout
_lowerCAmelCase : Optional[Any] = open(a__ , """a""" )
def __getattr__( self , a__ ):
return getattr(self.stdout , a__ )
def __A ( self , a__ ):
self.stdout.write(a__ )
# strip tqdm codes
self.file.write(re.sub(r"""^.*\r""" , """""" , a__ , 0 , re.M ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str]=80 ,_lowerCamelCase : Any=False ) -> Optional[Any]:
_lowerCAmelCase : List[Any] = []
# deal with critical env vars
_lowerCAmelCase : List[str] = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
_lowerCAmelCase : Union[str, Any] = os.environ.get(_lowerCamelCase ,_lowerCamelCase )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
_lowerCAmelCase : List[Any] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(_lowerCamelCase )
# now the normal args
cmd += list(map(shlex.quote ,sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Dict = """"""
while len(_lowerCamelCase ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(_lowerCamelCase ) == 0 or len(_lowerCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowerCamelCase )
_lowerCAmelCase : int = """"""
return "\\\n".join(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : int ) -> List[Any]:
# unwrap multi-line input
_lowerCAmelCase : Any = re.sub(r"""[\\\n]+""" ,""" """ ,args.base_cmd )
# remove --output_dir if any and set our own
_lowerCAmelCase : Union[str, Any] = re.sub("""--output_dir\s+[^\s]+""" ,"""""" ,args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
_lowerCAmelCase : Tuple = re.sub("""--overwrite_output_dir\s+""" ,"""""" ,args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ) -> Union[str, Any]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 ,100 ) for k in metric_keys} ,**{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} ,)
_lowerCAmelCase : Any = subprocess.run(_lowerCamelCase ,capture_output=_lowerCamelCase ,text=_lowerCamelCase )
if verbose:
print("""STDOUT""" ,result.stdout )
print("""STDERR""" ,result.stderr )
# save the streams
_lowerCAmelCase : Optional[int] = variation.replace(""" """ ,"""-""" )
with open(Path(_lowerCamelCase ) / f"log.{prefix}.stdout.txt" ,"""w""" ) as f:
f.write(result.stdout )
with open(Path(_lowerCamelCase ) / f"log.{prefix}.stderr.txt" ,"""w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" ,"""r""" ,encoding="""utf-8""" ) as f:
_lowerCAmelCase : Union[str, Any] = json.load(_lowerCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : Dict ,_lowerCamelCase : str ,_lowerCamelCase : List[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : Optional[Any] ,) -> Optional[Any]:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = f"{id}: {variation:<{longest_variation_len}}"
_lowerCAmelCase : Tuple = f"{preamble}: "
_lowerCAmelCase : Optional[int] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowerCamelCase ) ,desc=_lowerCamelCase ,leave=_lowerCamelCase ):
_lowerCAmelCase : Dict = process_run_single(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(_lowerCamelCase ):
metrics.append(_lowerCamelCase )
results.append(_lowerCamelCase )
outcome += "✓"
else:
outcome += "✘"
_lowerCAmelCase : Optional[int] = f"\33[2K\r{outcome}"
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_lowerCAmelCase : Tuple = round(mean_metrics[target_metric_key] ,2 )
_lowerCAmelCase : List[Any] = f"{outcome} {mean_target}"
if len(_lowerCamelCase ) > 1:
results_str += f" {tuple(round(_lowerCamelCase ,2 ) for x in results )}"
print(_lowerCamelCase )
_lowerCAmelCase : Any = variation
return mean_metrics
else:
print(_lowerCamelCase )
return {variation_key: variation, target_metric_key: nan}
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : int = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Dict ) -> int:
_lowerCAmelCase : Any = pd.DataFrame(_lowerCamelCase )
_lowerCAmelCase : str = """variation"""
_lowerCAmelCase : Dict = """diff_%"""
_lowerCAmelCase : Tuple = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_lowerCAmelCase : Optional[int] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowerCamelCase ):
# as a fallback, use the minimal value as the sentinel
_lowerCAmelCase : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowerCamelCase ):
_lowerCAmelCase : List[str] = df.apply(
lambda _lowerCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 ,axis="""columns""" ,)
# re-order columns
_lowerCAmelCase : Union[str, Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_lowerCAmelCase : Any = df.reindex(_lowerCamelCase ,axis="""columns""" ) # reorder cols
# capitalize
_lowerCAmelCase : Tuple = df.rename(str.capitalize ,axis="""columns""" )
# make the cols as narrow as possible
_lowerCAmelCase : int = df.rename(lambda _lowerCamelCase : c.replace("""_""" ,"""<br>""" ) ,axis="""columns""" )
_lowerCAmelCase : str = df.rename(lambda _lowerCamelCase : c.replace("""_""" ,"""\n""" ) ,axis="""columns""" )
_lowerCAmelCase : List[Any] = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowerCamelCase ,floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowerCamelCase ,floatfmt=""".2f""" )]
print("""\n\n""".join(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" ,default=_lowerCamelCase ,type=_lowerCamelCase ,required=_lowerCamelCase ,help="""Base cmd""" ,)
parser.add_argument(
"""--variations""" ,default=_lowerCamelCase ,type=_lowerCamelCase ,nargs="""+""" ,required=_lowerCamelCase ,help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" ,)
parser.add_argument(
"""--base-variation""" ,default=_lowerCamelCase ,type=_lowerCamelCase ,help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" ,)
parser.add_argument(
"""--target-metric-key""" ,default=_lowerCamelCase ,type=_lowerCamelCase ,required=_lowerCamelCase ,help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" ,)
parser.add_argument(
"""--report-metric-keys""" ,default="""""" ,type=_lowerCamelCase ,help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" ,)
parser.add_argument(
"""--repeat-times""" ,default=1 ,type=_lowerCamelCase ,help="""How many times to re-run each variation - an average will be reported""" ,)
parser.add_argument(
"""--output_dir""" ,default="""output_benchmark""" ,type=_lowerCamelCase ,help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" ,)
parser.add_argument(
"""--verbose""" ,default=_lowerCamelCase ,action="""store_true""" ,help="""Whether to show the outputs of each run or just the benchmark progress""" ,)
_lowerCAmelCase : int = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = args.output_dir
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
_lowerCAmelCase : int = get_base_command(_lowerCamelCase ,_lowerCamelCase )
# split each dimension into its --foo variations
_lowerCAmelCase : List[Any] = [list(map(str.strip ,re.split(r"""\|""" ,_lowerCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_lowerCAmelCase : Optional[Any] = list(map(str.strip ,map(""" """.join ,itertools.product(*_lowerCamelCase ) ) ) )
_lowerCAmelCase : Union[str, Any] = max(len(_lowerCamelCase ) for x in variations )
# split wanted keys
_lowerCAmelCase : List[Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_lowerCAmelCase : Dict = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script's output is also piped into {report_fn}" )
_lowerCAmelCase : List[str] = Tee(_lowerCamelCase )
print(f"\n*** Running {len(_lowerCamelCase )} benchmarks:" )
print(f"Base command: {' '.join(_lowerCamelCase )}" )
_lowerCAmelCase : Optional[Any] = """variation"""
_lowerCAmelCase : int = []
for id, variation in enumerate(tqdm(_lowerCamelCase ,desc="""Total completion: """ ,leave=_lowerCamelCase ) ):
_lowerCAmelCase : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,args.target_metric_key ,_lowerCamelCase ,args.repeat_times ,_lowerCamelCase ,args.verbose ,) )
process_results(_lowerCamelCase ,args.target_metric_key ,_lowerCamelCase ,args.base_variation ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 663 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a : Optional[Any] = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ['MobileViTFeatureExtractor']
_a : Optional[Any] = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 1 |
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __A :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=4 , a__="gelu" , a__=0.0 , a__=0.1 , a__=True , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Union[str, Any] = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[str] = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Union[str, Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Any = intermediate_multiple_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Any = hidden_dropout
_lowerCAmelCase : str = attention_dropout
_lowerCAmelCase : List[str] = weight_tying
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : Optional[Any] = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : str = num_labels
_lowerCAmelCase : Union[str, Any] = num_choices
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : str = None
if self.use_input_mask:
_lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def __A ( self ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_lowerCAmelCase : Any = True
return config, input_ids, input_mask, token_labels
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = GPTNeoXJapaneseModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(a__ , attention_mask=a__ )
_lowerCAmelCase : str = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Tuple = GPTNeoXJapaneseModel(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = GPTNeoXJapaneseForCausalLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : int = True
_lowerCAmelCase : Dict = GPTNeoXJapaneseForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
_lowerCAmelCase : Optional[int] = model(a__ , attention_mask=a__ , use_cache=a__ )
_lowerCAmelCase : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase : List[str] = model(a__ , attention_mask=a__ , output_hidden_states=a__ )
_lowerCAmelCase : Dict = output_from_no_past["""hidden_states"""][0]
_lowerCAmelCase : Tuple = model(
a__ , attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )["""hidden_states"""][0]
# select random slice
_lowerCAmelCase : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
def __A ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_UpperCamelCase : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : Union[str, Any] = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : int = False
_UpperCamelCase : int = False
_UpperCamelCase : int = False
def __A ( self ):
_lowerCAmelCase : List[str] = GPTNeoXJapaneseModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a__ , a__ , a__ )
def __A ( self ):
# This regression test was failing with PyTorch < 1.3
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """abeja/gpt-neox-japanese-2.7b"""
_lowerCAmelCase : str = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
_lowerCAmelCase : Optional[int] = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
_lowerCAmelCase : Tuple = GPTNeoXJapaneseTokenizer.from_pretrained(a__ )
_lowerCAmelCase : Tuple = GPTNeoXJapaneseForCausalLM.from_pretrained(a__ )
_lowerCAmelCase : Dict = []
for prompt in prompts:
_lowerCAmelCase : str = tokenizer(a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Optional[int] = model.generate(a__ , max_length=50 )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
predicted_outputs += generated_string
self.assertListEqual(a__ , a__ )
| 663 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_a : str = logging.getLogger(__name__)
@dataclass
class __A :
_UpperCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_UpperCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether tp freeze the encoder."} )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class __A :
_UpperCamelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_UpperCamelCase : Optional[str] = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
_UpperCamelCase : Optional[int] = field(
default=1_024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=128 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
_UpperCamelCase : Optional[int] = field(
default=142 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
_UpperCamelCase : Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
_UpperCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Source language id for translation."} )
_UpperCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Target language id for translation."} )
_UpperCamelCase : Optional[int] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "# num_beams to use for evaluation."} )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ) -> Any:
logger.info(f"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(f" {key} = {metrics[key]}" )
save_json(_lowerCamelCase ,os.path.join(_lowerCamelCase ,f"{split}_results.json" ) )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowerCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
check_output_dir(_lowerCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) ,training_args.fpaa ,)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" ,_lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_lowerCAmelCase : List[Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ):
assert hasattr(_lowerCamelCase ,_lowerCamelCase ), f"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(_lowerCamelCase ,_lowerCamelCase ,getattr(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_lowerCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path ,from_tf=""".ckpt""" in model_args.model_name_or_path ,config=_lowerCamelCase ,cache_dir=model_args.cache_dir ,)
# use task specific params
use_task_specific_params(_lowerCamelCase ,data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_lowerCAmelCase : str = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_lowerCamelCase ,(MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_lowerCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_lowerCamelCase )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_lowerCAmelCase : List[str] = SeqaSeqDataset
# Get datasets
_lowerCAmelCase : Dict = (
dataset_class(
_lowerCamelCase ,type_path="""train""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_train ,max_target_length=data_args.max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_train
else None
)
_lowerCAmelCase : List[Any] = (
dataset_class(
_lowerCamelCase ,type_path="""val""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_val ,max_target_length=data_args.val_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_lowerCAmelCase : Dict = (
dataset_class(
_lowerCamelCase ,type_path="""test""" ,data_dir=data_args.data_dir ,n_obs=data_args.n_test ,max_target_length=data_args.test_max_target_length ,max_source_length=data_args.max_source_length ,prefix=model.config.prefix or """""" ,)
if training_args.do_predict
else None
)
# Initialize our Trainer
_lowerCAmelCase : Union[str, Any] = (
build_compute_metrics_fn(data_args.task ,_lowerCamelCase ) if training_args.predict_with_generate else None
)
_lowerCAmelCase : int = SeqaSeqTrainer(
model=_lowerCamelCase ,args=_lowerCamelCase ,data_args=_lowerCamelCase ,train_dataset=_lowerCamelCase ,eval_dataset=_lowerCamelCase ,data_collator=SeqaSeqDataCollator(
_lowerCamelCase ,_lowerCamelCase ,model.config.decoder_start_token_id ,training_args.tpu_num_cores ) ,compute_metrics=_lowerCamelCase ,tokenizer=_lowerCamelCase ,)
_lowerCAmelCase : Any = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_lowerCAmelCase : Union[str, Any] = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_lowerCAmelCase : List[str] = train_result.metrics
_lowerCAmelCase : List[Any] = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" ,_lowerCamelCase ,training_args.output_dir )
all_metrics.update(_lowerCamelCase )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir ,"""trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase : Optional[Any] = trainer.evaluate(metric_key_prefix="""val""" )
_lowerCAmelCase : List[Any] = data_args.n_val
_lowerCAmelCase : Tuple = round(metrics["""val_loss"""] ,4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" ,_lowerCamelCase ,training_args.output_dir )
all_metrics.update(_lowerCamelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_lowerCAmelCase : List[Any] = trainer.predict(test_dataset=_lowerCamelCase ,metric_key_prefix="""test""" )
_lowerCAmelCase : Tuple = test_output.metrics
_lowerCAmelCase : List[str] = data_args.n_test
if trainer.is_world_process_zero():
_lowerCAmelCase : int = round(metrics["""test_loss"""] ,4 )
handle_metrics("""test""" ,_lowerCamelCase ,training_args.output_dir )
all_metrics.update(_lowerCamelCase )
if training_args.predict_with_generate:
_lowerCAmelCase : Dict = tokenizer.batch_decode(
test_output.predictions ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase )
_lowerCAmelCase : List[str] = lmap(str.strip ,_lowerCamelCase )
write_txt_file(_lowerCamelCase ,os.path.join(training_args.output_dir ,"""test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(_lowerCamelCase ,os.path.join(training_args.output_dir ,"""all_results.json""" ) )
return all_metrics
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 663 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : List[str] = logging.get_logger(__name__)
_a : Union[str, Any] = {'vocab_file': 'sentencepiece.bpe.model'}
_a : Any = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
_a : Union[str, Any] = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
_a : Union[str, Any] = '▁'
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_lowerCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCAmelCase : Optional[Any] = vocab_file
_lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
_lowerCAmelCase : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_lowerCAmelCase : List[str] = len(self.sp_model ) - 1
_lowerCAmelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Dict = [self.cls_token_id]
_lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Union[str, Any] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self ):
return len(self.sp_model )
def __A ( self ):
_lowerCAmelCase : List[Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ ):
return self.sp_model.encode(a__ , out_type=a__ )
def __A ( self , a__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Optional[int] = self.sp_model.PieceToId(a__ )
return spm_id if spm_id else self.unk_token_id
def __A ( self , a__ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = """"""
_lowerCAmelCase : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
_lowerCAmelCase : int = True
_lowerCAmelCase : Any = []
else:
current_sub_tokens.append(a__ )
_lowerCAmelCase : Any = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : Any = None
return state
def __setstate__( self , a__ ):
_lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , """wb""" ) as fi:
_lowerCAmelCase : str = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 663 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 1 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 1 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __A :
@property
def __A ( self ):
return self.get_dummy_input()
@property
def __A ( self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def __A ( self , a__=True , a__=False , a__=False , a__=False , ):
_lowerCAmelCase : Any = 4
_lowerCAmelCase : Optional[Any] = 32
_lowerCAmelCase : Optional[Any] = (32, 32)
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Tuple = torch.device(a__ )
_lowerCAmelCase : Any = (batch_size, num_channels) + sizes
_lowerCAmelCase : Any = randn_tensor(a__ , generator=a__ , device=a__ )
_lowerCAmelCase : Union[str, Any] = {"""hidden_states""": hidden_states}
if include_temb:
_lowerCAmelCase : Optional[Any] = 128
_lowerCAmelCase : Optional[Any] = randn_tensor((batch_size, temb_channels) , generator=a__ , device=a__ )
if include_res_hidden_states_tuple:
_lowerCAmelCase : Optional[Any] = torch.manual_seed(1 )
_lowerCAmelCase : Optional[int] = (randn_tensor(a__ , generator=a__ , device=a__ ),)
if include_encoder_hidden_states:
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, 32, 32) ).to(a__ )
if include_skip_sample:
_lowerCAmelCase : Optional[int] = randn_tensor(((batch_size, 3) + sizes) , generator=a__ , device=a__ )
return dummy_input
def __A ( self ):
_lowerCAmelCase : Any = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
_lowerCAmelCase : Optional[Any] = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
_lowerCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __A ( self , a__ ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase : str = self.block_class(**a__ )
unet_block.to(a__ )
unet_block.eval()
with torch.no_grad():
_lowerCAmelCase : Any = unet_block(**a__ )
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = output[0]
self.assertEqual(output.shape , self.output_shape )
_lowerCAmelCase : List[Any] = output[0, -1, -3:, -3:]
_lowerCAmelCase : Any = torch.tensor(a__ ).to(a__ )
assert torch_all_close(output_slice.flatten() , a__ , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.prepare_init_args_and_inputs_for_common()
_lowerCAmelCase : Any = self.block_class(**a__ )
model.to(a__ )
model.train()
_lowerCAmelCase : List[str] = model(**a__ )
if isinstance(a__ , a__ ):
_lowerCAmelCase : int = output[0]
_lowerCAmelCase : int = torch.device(a__ )
_lowerCAmelCase : Union[str, Any] = randn_tensor(output.shape , device=a__ )
_lowerCAmelCase : Dict = torch.nn.functional.mse_loss(a__ , a__ )
loss.backward()
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
@property
def __A ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ):
_lowerCAmelCase : List[Any] = ort.SessionOptions()
_lowerCAmelCase : Union[str, Any] = False
return options
def __A ( self ):
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
_lowerCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=a__ , feature_extractor=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[str] = """A red cat sitting on a park bench"""
_lowerCAmelCase : str = np.random.RandomState(0 )
_lowerCAmelCase : List[Any] = pipe(
prompt=a__ , image=a__ , mask_image=a__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=15 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
_a : Any = random.Random()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=1.0 ,_lowerCamelCase : List[Any]=None ,_lowerCamelCase : Dict=None ) -> int:
if rng is None:
_lowerCAmelCase : List[str] = global_rng
_lowerCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=400 , a__=2000 , a__=1 , a__=0.0 , a__=16000 , a__=True , a__=80 , a__=16 , a__=64 , a__="hann_window" , a__=80 , a__=7600 , a__=1e-10 , a__=True , ):
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Union[str, Any] = min_seq_length
_lowerCAmelCase : Any = max_seq_length
_lowerCAmelCase : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase : Optional[Any] = feature_size
_lowerCAmelCase : int = padding_value
_lowerCAmelCase : List[Any] = sampling_rate
_lowerCAmelCase : Tuple = do_normalize
_lowerCAmelCase : Dict = num_mel_bins
_lowerCAmelCase : Dict = hop_length
_lowerCAmelCase : Any = win_length
_lowerCAmelCase : Optional[int] = win_function
_lowerCAmelCase : List[Any] = fmin
_lowerCAmelCase : List[str] = fmax
_lowerCAmelCase : List[str] = mel_floor
_lowerCAmelCase : Any = return_attention_mask
def __A ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __A ( self , a__=False , a__=False ):
def _flatten(a__ ):
return list(itertools.chain(*a__ ) )
if equal_length:
_lowerCAmelCase : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_lowerCAmelCase : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : List[str] = [np.asarray(a__ ) for x in speech_inputs]
return speech_inputs
def __A ( self , a__=False , a__=False ):
if equal_length:
_lowerCAmelCase : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase : List[str] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase : Union[str, Any] = [np.asarray(a__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = SpeechTaFeatureExtractor
def __A ( self ):
_lowerCAmelCase : Any = SpeechTaFeatureExtractionTester(self )
def __A ( self , a__ ):
self.assertTrue(np.all(np.mean(a__ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a__ , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : List[Any] = [np.asarray(a__ ) for speech_input in speech_inputs]
# Test not batched input
_lowerCAmelCase : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
_lowerCAmelCase : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test batched
_lowerCAmelCase : Union[str, Any] = feat_extract(a__ , return_tensors="""np""" ).input_values
_lowerCAmelCase : Tuple = feat_extract(a__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
def __A ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Any = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : str = [None, 1600, None]
for max_length, padding in zip(a__ , a__ ):
_lowerCAmelCase : List[Any] = feat_extract(a__ , padding=a__ , max_length=a__ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __A ( self ):
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Union[str, Any] = range(800 , 1400 , 200 )
_lowerCAmelCase : int = [floats_list((1, x) )[0] for x in lengths]
_lowerCAmelCase : int = ["""longest""", """max_length""", """do_not_pad"""]
_lowerCAmelCase : Optional[int] = [None, 1600, None]
for max_length, padding in zip(a__ , a__ ):
_lowerCAmelCase : Any = feat_extract(a__ , max_length=a__ , padding=a__ )
_lowerCAmelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __A ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : int = feat_extract(
a__ , truncation=a__ , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
_lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __A ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Optional[Any] = feat_extract(
a__ , truncation=a__ , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : List[Any] = feat_extract(
a__ , truncation=a__ , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __A ( self ):
_lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
_lowerCAmelCase : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_lowerCAmelCase : List[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __A ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_lowerCAmelCase : Tuple = [np.asarray(a__ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase : List[str] = feature_extractor(audio_target=a__ , padding=a__ , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
_lowerCAmelCase : str = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
_lowerCAmelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test batched
_lowerCAmelCase : List[Any] = feature_extractor(a__ , return_tensors="""np""" ).input_values
_lowerCAmelCase : Any = feature_extractor(a__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase : str = np.asarray(a__ )
_lowerCAmelCase : List[Any] = feature_extractor(a__ , return_tensors="""np""" ).input_values
_lowerCAmelCase : List[Any] = feature_extractor(a__ , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(a__ , a__ ):
self.assertTrue(np.allclose(a__ , a__ , atol=1e-3 ) )
def __A ( self ):
_lowerCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0]
_lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a__ ) == len(a__ ) for x, y in zip(a__ , processed_features[input_name] ) ) )
_lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a__ )
_lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_lowerCAmelCase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self ):
_lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_target(equal_length=a__ )
_lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : Tuple = feat_extract.model_input_names[0]
_lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_lowerCAmelCase : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase : Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __A ( self ):
_lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : str = feat_extract.model_input_names[0]
_lowerCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : Any = feat_extract.num_mel_bins # hack!
_lowerCAmelCase : Optional[int] = feat_extract.pad(a__ , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase : str = feat_extract.pad(a__ , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def __A ( self ):
_lowerCAmelCase : List[str] = self.feat_extract_dict
_lowerCAmelCase : Dict = True
_lowerCAmelCase : str = self.feature_extraction_class(**a__ )
_lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : Optional[int] = [len(a__ ) for x in speech_inputs]
_lowerCAmelCase : Tuple = feat_extract.model_input_names[0]
_lowerCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : str = feat_extract.num_mel_bins # hack!
_lowerCAmelCase : Tuple = feat_extract.pad(a__ , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , a__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.feat_extract_dict
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : Dict = self.feature_extraction_class(**a__ )
_lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
_lowerCAmelCase : int = [len(a__ ) for x in speech_inputs]
_lowerCAmelCase : Optional[int] = feat_extract.model_input_names[0]
_lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase : List[Any] = min(a__ )
_lowerCAmelCase : Optional[int] = feat_extract.num_mel_bins # hack!
_lowerCAmelCase : Tuple = feat_extract.pad(
a__ , padding="""max_length""" , max_length=a__ , truncation=a__ , return_tensors="""np""" )
self.assertIn("""attention_mask""" , a__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __A ( self , a__ ):
from datasets import load_dataset
_lowerCAmelCase : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
_lowerCAmelCase : Union[str, Any] = ds.sort("""id""" ).select(range(a__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ):
# fmt: off
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
_lowerCAmelCase : Tuple = self._load_datasamples(1 )
_lowerCAmelCase : int = SpeechTaFeatureExtractor()
_lowerCAmelCase : int = feature_extractor(a__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , a__ , atol=1e-6 ) )
def __A ( self ):
# fmt: off
_lowerCAmelCase : str = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
_lowerCAmelCase : Optional[int] = self._load_datasamples(1 )
_lowerCAmelCase : Optional[Any] = SpeechTaFeatureExtractor()
_lowerCAmelCase : Tuple = feature_extractor(audio_target=a__ , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , a__ , atol=1e-4 ) )
| 663 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 1 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
@register_to_config
def __init__( self , a__ = 768 , ):
super().__init__()
_lowerCAmelCase : Optional[int] = nn.Parameter(torch.zeros(1 , a__ ) )
_lowerCAmelCase : Optional[Any] = nn.Parameter(torch.ones(1 , a__ ) )
def __A ( self , a__ = None , a__ = None , ):
_lowerCAmelCase : Any = nn.Parameter(self.mean.to(a__ ).to(a__ ) )
_lowerCAmelCase : Any = nn.Parameter(self.std.to(a__ ).to(a__ ) )
return self
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = (embeds * self.std) + self.mean
return embeds
| 663 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=3 , a__=32 , a__=3 , a__=10 , a__=[10, 20, 30, 40] , a__=[1, 1, 2, 1] , a__=True , a__=True , a__="relu" , a__=3 , a__=None , ):
_lowerCAmelCase : Any = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : Union[str, Any] = embeddings_size
_lowerCAmelCase : int = hidden_sizes
_lowerCAmelCase : List[str] = depths
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : Any = num_labels
_lowerCAmelCase : Any = scope
_lowerCAmelCase : List[Any] = len(a__ )
def __A ( self ):
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values
def __A ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : str = FlaxRegNetModel(config=a__ )
_lowerCAmelCase : Dict = model(a__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Dict = self.num_labels
_lowerCAmelCase : List[Any] = FlaxRegNetForImageClassification(config=a__ )
_lowerCAmelCase : Any = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase : int = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : int = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
_UpperCamelCase : str = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Optional[Any] = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = FlaxRegNetModelTester(self )
_lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=a__ , has_text_modality=a__ )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(a__ )
_lowerCAmelCase : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self ):
def check_hidden_states_output(a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = model_class(a__ )
_lowerCAmelCase : int = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase : Tuple = self.model_tester.num_stages
self.assertEqual(len(a__ ) , expected_num_stages + 1 )
_lowerCAmelCase , _lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : List[Any] = True
check_hidden_states_output(a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : Optional[int] = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : Any = model_class(a__ )
@jax.jit
def model_jitted(a__ , **a__ ):
return model(pixel_values=a__ , **a__ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Optional[Any] = model_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : List[str] = model_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Dict = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=a__ , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = model(**a__ )
# verify the logits
_lowerCAmelCase : Optional[Any] = (1, 1000)
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : Dict = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
| 663 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 1 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_a : List[str] = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_a , _a : Dict = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_a : Any = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_a : List[Any] = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_a : Optional[int] = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : int = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "data2vec-audio"
def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.1 , a__=0.1 , a__=0.0_2 , a__=1e-5 , a__="gelu" , a__=(512, 512, 512, 512, 512, 512, 512) , a__=(5, 2, 2, 2, 2, 2, 2) , a__=(10, 3, 3, 3, 3, 2, 2) , a__=False , a__=16 , a__=19 , a__=5 , a__=0.0_5 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=0 , a__="sum" , a__=False , a__=False , a__=256 , a__=(512, 512, 512, 512, 1500) , a__=(5, 3, 3, 1, 1) , a__=(1, 2, 3, 1, 1) , a__=512 , a__=0 , a__=1 , a__=2 , a__=False , a__=3 , a__=2 , a__=3 , a__=None , **a__ , ):
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_activation
_lowerCAmelCase : Any = list(a__ )
_lowerCAmelCase : Dict = list(a__ )
_lowerCAmelCase : Union[str, Any] = list(a__ )
_lowerCAmelCase : int = conv_bias
_lowerCAmelCase : Tuple = num_conv_pos_embeddings
_lowerCAmelCase : Dict = num_conv_pos_embedding_groups
_lowerCAmelCase : Optional[int] = conv_pos_kernel_size
_lowerCAmelCase : str = len(self.conv_dim )
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : str = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Any = activation_dropout
_lowerCAmelCase : Tuple = feat_proj_dropout
_lowerCAmelCase : Dict = final_dropout
_lowerCAmelCase : Union[str, Any] = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Tuple = mask_time_prob
_lowerCAmelCase : Any = mask_time_length
_lowerCAmelCase : Tuple = mask_time_min_masks
_lowerCAmelCase : Tuple = mask_feature_prob
_lowerCAmelCase : int = mask_feature_length
_lowerCAmelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCAmelCase : Optional[int] = ctc_loss_reduction
_lowerCAmelCase : int = ctc_zero_infinity
# adapter
_lowerCAmelCase : int = add_adapter
_lowerCAmelCase : List[Any] = adapter_kernel_size
_lowerCAmelCase : Tuple = adapter_stride
_lowerCAmelCase : Tuple = num_adapter_layers
_lowerCAmelCase : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : List[str] = list(a__ )
_lowerCAmelCase : List[Any] = list(a__ )
_lowerCAmelCase : str = list(a__ )
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def __A ( self ):
return math.prod(self.conv_stride )
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
_a : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int=False ) -> Optional[int]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
_lowerCAmelCase : List[Any] = os.path.abspath(_lowerCamelCase )
logger.info(f"Loading PyTorch weights from {pt_path}" )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase ,map_location="""cpu""" )
logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
_lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(_lowerCamelCase ,_lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_lowerCAmelCase : Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(_lowerCamelCase ,_lowerCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple[str] ,_lowerCamelCase : np.ndarray ,_lowerCamelCase : Dict[str, jnp.ndarray] ,_lowerCamelCase : str ,) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(_lowerCamelCase : Tuple[str] ) -> bool:
return len(set(_lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_lowerCAmelCase : int = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowerCAmelCase : int = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
_lowerCAmelCase : str = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowerCAmelCase : List[Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowerCAmelCase : str = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowerCAmelCase : List[str] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_lowerCAmelCase : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_lowerCAmelCase : Optional[int] = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_lowerCAmelCase : int = pt_tuple_key[-2] + """_v"""
if name is not None:
_lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> Dict:
# convert pytorch tensor to numpy
_lowerCAmelCase : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
_lowerCAmelCase : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_lowerCAmelCase : Any = flax_model.params["""params"""]
else:
_lowerCAmelCase : Optional[Any] = flax_model.params
_lowerCAmelCase : Optional[int] = flatten_dict(_lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_lowerCAmelCase : Union[str, Any] = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_lowerCamelCase )
_lowerCAmelCase : Any = {}
_lowerCAmelCase : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_lowerCAmelCase : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase : int = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_lowerCAmelCase : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : str = pt_tuple_key[1:]
# Correctly rename weight parameters
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = rename_key_and_reshape_tensor(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# add model prefix if necessary
_lowerCAmelCase : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : Optional[Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_lowerCAmelCase : Optional[int] = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Dict = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Optional[Any] = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ) -> List[str]:
import torch
# Load the index
_lowerCAmelCase : List[Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
_lowerCAmelCase : Tuple = torch.load(_lowerCamelCase )
_lowerCAmelCase : int = {k: v.numpy() for k, v in pt_state_dict.items()}
_lowerCAmelCase : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_lowerCAmelCase : Tuple = flax_model.params["""params"""]
_lowerCAmelCase : List[str] = flatten_dict(_lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
_lowerCAmelCase : Union[str, Any] = flax_model.params
_lowerCAmelCase : Dict = flatten_dict(_lowerCamelCase )
_lowerCAmelCase : Any = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_lowerCAmelCase : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase : List[Any] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_lowerCAmelCase : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
_lowerCAmelCase , _lowerCAmelCase : str = rename_key_and_reshape_tensor(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# add model prefix if necessary
_lowerCAmelCase : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_lowerCAmelCase : Optional[Any] = jnp.asarray(_lowerCamelCase )
continue
if "var" in flax_key[-1]:
_lowerCAmelCase : Tuple = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : List[Any] = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Union[str, Any] = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ) -> Optional[Any]:
_lowerCAmelCase : Tuple = os.path.abspath(_lowerCamelCase )
logger.info(f"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
_lowerCAmelCase : int = getattr(_lowerCamelCase ,"""Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCamelCase ,"""rb""" ) as state_f:
try:
_lowerCAmelCase : Dict = from_bytes(_lowerCamelCase ,state_f.read() )
except UnpicklingError:
raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ) -> Optional[int]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_lowerCAmelCase : List[str] = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa ,_lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_lowerCAmelCase : Dict = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,_lowerCamelCase )
_lowerCAmelCase : Tuple = flatten_dict(_lowerCamelCase )
_lowerCAmelCase : List[Any] = pt_model.state_dict()
_lowerCAmelCase : str = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
_lowerCAmelCase : int = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : str = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCAmelCase : int = flax_key_tuple[0] == pt_model.base_model_prefix
_lowerCAmelCase : Union[str, Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : int = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : str = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCamelCase ) not in pt_model_dict:
# conv layer
_lowerCAmelCase : str = flax_key_tuple[:-1] + ("""weight""",)
_lowerCAmelCase : Any = jnp.transpose(_lowerCamelCase ,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ) not in pt_model_dict:
# linear layer
_lowerCAmelCase : Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
_lowerCAmelCase : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCAmelCase : str = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_lowerCAmelCase : Dict = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
_lowerCAmelCase : Optional[Any] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
_lowerCAmelCase : Union[str, Any] = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_lowerCAmelCase : Tuple = """.""".join(_lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_lowerCAmelCase : List[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_lowerCAmelCase : Tuple = key.split(""".""" )
_lowerCAmelCase : List[str] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_lowerCAmelCase : Optional[int] = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
_lowerCAmelCase : List[Any] = key_components[-2] + """_v"""
if name is not None:
_lowerCAmelCase : Dict = key_components[:-3] + [name]
_lowerCAmelCase : Optional[Any] = """.""".join(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = key
if flax_key in special_pt_names:
_lowerCAmelCase : Any = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
_lowerCAmelCase : List[str] = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase ,np.ndarray ) else flax_tensor
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
_lowerCAmelCase : Dict = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(_lowerCamelCase ) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
""" use it for predictions and inference.""" )
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"""If your task is similar to the task the model of the checkpoint was trained on, """
f"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 663 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : Union[str, Any] = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 1 |
"""simple docstring"""
_a : str = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
_a : Dict = {value: key for key, value in encode_dict.items()}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str:
_lowerCAmelCase : Tuple = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("""encode() accepts only letters of the alphabet and spaces""" )
return encoded
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str:
if set(_lowerCamelCase ) - {"A", "B", " "} != set():
raise Exception("""decode() accepts only 'A', 'B' and spaces""" )
_lowerCAmelCase : Dict = """"""
for word in coded.split():
while len(_lowerCamelCase ) != 0:
decoded += decode_dict[word[:5]]
_lowerCAmelCase : Dict = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 663 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_a : int = Lock()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ,_lowerCamelCase : List[str] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : int ,_lowerCamelCase : str ) -> Tuple:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 ,10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowerCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowerCAmelCase : List[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowerCAmelCase : Any = min(_lowerCamelCase ,_lowerCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowerCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowerCAmelCase : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowerCAmelCase : Dict = max(_lowerCamelCase ,_lowerCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> Optional[int]:
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowerCAmelCase : Optional[Any] = Pipe()
_lowerCAmelCase : Optional[int] = Pipe()
process_array_.append(
Process(
target=_lowerCamelCase ,args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) ,) )
_lowerCAmelCase : List[Any] = temp_rs
_lowerCAmelCase : int = temp_rr
for i in range(1 ,len(_lowerCamelCase ) - 1 ):
_lowerCAmelCase : List[Any] = Pipe()
_lowerCAmelCase : Dict = Pipe()
process_array_.append(
Process(
target=_lowerCamelCase ,args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) ,) )
_lowerCAmelCase : List[Any] = temp_rs
_lowerCAmelCase : str = temp_rr
process_array_.append(
Process(
target=_lowerCamelCase ,args=(
len(_lowerCamelCase ) - 1,
arr[len(_lowerCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowerCamelCase ) - 1],
) ,) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 ,len(_lowerCamelCase ) ):
_lowerCAmelCase : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : List[str] = list(range(10 ,0 ,-1 ) )
print("""Initial List""" )
print(*_lowerCamelCase )
_lowerCAmelCase : int = odd_even_transposition(_lowerCamelCase )
print("""Sorted List\n""" )
print(*_lowerCamelCase )
if __name__ == "__main__":
main()
| 663 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = KandinskyVaaPriorPipeline
_UpperCamelCase : Any = ["prompt"]
_UpperCamelCase : List[Any] = ["prompt", "negative_prompt"]
_UpperCamelCase : Any = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_UpperCamelCase : List[str] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
_lowerCAmelCase : Optional[int] = PriorTransformer(**a__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_lowerCAmelCase : List[str] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_lowerCAmelCase : Tuple = CLIPVisionModelWithProjection(a__ )
return model
@property
def __A ( self ):
_lowerCAmelCase : Tuple = CLIPImageProcessor(
crop_size=224 , do_center_crop=a__ , do_normalize=a__ , do_resize=a__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def __A ( self ):
_lowerCAmelCase : List[str] = self.dummy_prior
_lowerCAmelCase : Union[str, Any] = self.dummy_image_encoder
_lowerCAmelCase : List[str] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_image_processor
_lowerCAmelCase : Union[str, Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=1_0.0 , )
_lowerCAmelCase : Union[str, Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : int = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Any = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Tuple = """cpu"""
_lowerCAmelCase : Optional[Any] = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.image_embeds
_lowerCAmelCase : Optional[int] = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Optional[Any] = image[0, -10:]
_lowerCAmelCase : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_lowerCAmelCase : str = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A ( self ):
_lowerCAmelCase : Optional[int] = torch_device == """cpu"""
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Any = False
self._test_inference_batch_single_identical(
test_max_difference=a__ , relax_max_difference=a__ , test_mean_pixel_difference=a__ , )
@skip_mps
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=a__ , test_mean_pixel_difference=a__ , )
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
def __init__( self , a__ , a__=13 , a__=32 , a__=2 , a__=3 , a__=16 , a__=[32, 64, 128] , a__=[1, 2, 1] , a__=[2, 2, 4] , a__=2 , a__=2.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=True , a__=0.0_2 , a__=1e-5 , a__=True , a__=None , a__=True , a__=10 , a__=8 , a__=["stage1", "stage2"] , a__=[1, 2] , ):
_lowerCAmelCase : str = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Optional[Any] = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : Tuple = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : Optional[int] = window_size
_lowerCAmelCase : Optional[int] = mlp_ratio
_lowerCAmelCase : Optional[Any] = qkv_bias
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : int = drop_path_rate
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : List[Any] = use_absolute_embeddings
_lowerCAmelCase : Union[str, Any] = patch_norm
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = is_training
_lowerCAmelCase : Optional[int] = scope
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Tuple = encoder_stride
_lowerCAmelCase : str = out_features
_lowerCAmelCase : Tuple = out_indices
def __A ( self ):
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = FocalNetModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[str] = model(a__ )
_lowerCAmelCase : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Optional[int] = FocalNetBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = FocalNetForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = model(a__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Any = FocalNetForMaskedImageModeling(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = model(a__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : str = self.type_sequence_label_size
_lowerCAmelCase : Tuple = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Any = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : str = 1
_lowerCAmelCase : int = FocalNetForImageClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Optional[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Any = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = FocalNetModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=a__ , embed_dim=37 , has_text_modality=a__ )
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def __A ( self ):
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase : List[Any] = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase : str = model_class(a__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : List[str] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Union[str, Any] = outputs.hidden_states
_lowerCAmelCase : List[str] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# FocalNet has a different seq_length
_lowerCAmelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowerCAmelCase : Any = outputs.reshaped_hidden_states
self.assertEqual(len(a__ ) , a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = reshaped_hidden_states[0].shape
_lowerCAmelCase : List[str] = (
reshaped_hidden_states[0].view(a__ , a__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase : int = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = 3
_lowerCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_lowerCAmelCase : str = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : int = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@slow
def __A ( self ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Any = FocalNetModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Any = _config_zero_init(a__ )
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(config=a__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : List[str] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(a__ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_lowerCAmelCase : str = image_processor(images=a__ , return_tensors="""pt""" ).to(a__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**a__ )
# verify the logits
_lowerCAmelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : Any = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = (FocalNetBackbone,) if is_torch_available() else ()
_UpperCamelCase : int = FocalNetConfig
_UpperCamelCase : List[str] = False
def __A ( self ):
_lowerCAmelCase : Any = FocalNetModelTester(self )
| 663 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
_a : Any = threading.Lock()
_a : Optional[logging.Handler] = None
_a : Optional[Any] = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
_a : Tuple = logging.WARNING
_a : str = True
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Tuple = os.getenv("""TRANSFORMERS_VERBOSITY""" ,_lowerCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def SCREAMING_SNAKE_CASE ( ) -> str:
return __name__.split(""".""" )[0]
def SCREAMING_SNAKE_CASE ( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE ( ) -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowerCAmelCase : Tuple = logging.StreamHandler() # Set sys.stderr as stream.
_lowerCAmelCase : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowerCAmelCase : List[str] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowerCAmelCase : Optional[int] = False
def SCREAMING_SNAKE_CASE ( ) -> None:
global _default_handler
with _lock:
if not _default_handler:
return
_lowerCAmelCase : Union[str, Any] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowerCAmelCase : Dict = None
def SCREAMING_SNAKE_CASE ( ) -> int:
return log_levels
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[str] = None ) -> logging.Logger:
if name is None:
_lowerCAmelCase : Optional[int] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
return set_verbosity(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
return set_verbosity(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
return set_verbosity(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
return set_verbosity(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def SCREAMING_SNAKE_CASE ( ) -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : logging.Handler ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : logging.Handler ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> None:
_configure_library_root_logger()
_lowerCAmelCase : Dict = False
def SCREAMING_SNAKE_CASE ( ) -> None:
_configure_library_root_logger()
_lowerCAmelCase : Dict = True
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : List[str] = _get_library_root_logger().handlers
for handler in handlers:
_lowerCAmelCase : str = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : Optional[int] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ,*_lowerCamelCase : Dict ,**_lowerCamelCase : Optional[Any] ) -> Dict:
_lowerCAmelCase : Optional[int] = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" ,_lowerCamelCase )
if no_advisory_warnings:
return
self.warning(*_lowerCamelCase ,**_lowerCamelCase )
_a : int = warning_advice
@functools.lru_cache(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,*_lowerCamelCase : Tuple ,**_lowerCamelCase : Any ) -> List[str]:
self.warning(*_lowerCamelCase ,**_lowerCamelCase )
_a : List[Any] = warning_once
class __A :
def __init__( self , *a__ , **a__ ): # pylint: disable=unused-argument
_lowerCAmelCase : Dict = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , a__ ):
def empty_fn(*a__ , **a__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , a__ , a__ , a__ ):
return
class __A :
def __call__( self , *a__ , **a__ ):
if _tqdm_active:
return tqdm_lib.tqdm(*a__ , **a__ )
else:
return EmptyTqdm(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
_lowerCAmelCase : Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*a__ , **a__ )
def __A ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
_a : str = _tqdm_cls()
def SCREAMING_SNAKE_CASE ( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
global _tqdm_active
_lowerCAmelCase : Tuple = True
hf_hub_utils.enable_progress_bars()
def SCREAMING_SNAKE_CASE ( ) -> int:
global _tqdm_active
_lowerCAmelCase : Any = False
hf_hub_utils.disable_progress_bars()
| 663 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Tuple = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = "gpt_neox_japanese"
def __init__( self , a__=32000 , a__=2560 , a__=32 , a__=32 , a__=4 , a__="gelu" , a__=1.0_0 , a__=10000 , a__=2048 , a__=0.0_2 , a__=1e-5 , a__=True , a__=31996 , a__=31999 , a__=0.1 , a__=0.0 , **a__ , ):
super().__init__(bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Optional[Any] = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_multiple_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Dict = rotary_pct
_lowerCAmelCase : Union[str, Any] = rotary_emb_base
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Tuple = attention_dropout
_lowerCAmelCase : str = hidden_dropout
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Dict = tempfile.mkdtemp()
_lowerCAmelCase : Tuple = SamImageProcessor()
_lowerCAmelCase : Tuple = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **a__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : int = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
_lowerCAmelCase : str = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Dict = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
_lowerCAmelCase : Optional[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : List[Any] = SamProcessor(image_processor=a__ )
_lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
_lowerCAmelCase : List[Any] = image_processor(a__ , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processor(images=a__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def __A ( self ):
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : str = SamProcessor(image_processor=a__ )
_lowerCAmelCase : Optional[int] = [torch.ones((1, 3, 5, 5) )]
_lowerCAmelCase : List[Any] = [[1764, 2646]]
_lowerCAmelCase : str = [[683, 1024]]
_lowerCAmelCase : int = processor.post_process_masks(a__ , a__ , a__ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : Optional[Any] = processor.post_process_masks(
a__ , torch.tensor(a__ ) , torch.tensor(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_lowerCAmelCase : List[Any] = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase : int = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : Dict = [[1, 0], [0, 1]]
with self.assertRaises(a__ ):
_lowerCAmelCase : Tuple = processor.post_process_masks(a__ , np.array(a__ ) , np.array(a__ ) )
@require_vision
@require_tf
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : List[str] = tempfile.mkdtemp()
_lowerCAmelCase : List[str] = SamImageProcessor()
_lowerCAmelCase : Optional[Any] = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **a__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Any = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ):
_lowerCAmelCase : List[str] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
_lowerCAmelCase : Optional[int] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.get_image_processor()
_lowerCAmelCase : str = SamProcessor(image_processor=a__ )
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : List[str] = image_processor(a__ , return_tensors="""np""" )
_lowerCAmelCase : Any = processor(images=a__ , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Dict = SamProcessor(image_processor=a__ )
_lowerCAmelCase : Dict = [tf.ones((1, 3, 5, 5) )]
_lowerCAmelCase : List[str] = [[1764, 2646]]
_lowerCAmelCase : List[str] = [[683, 1024]]
_lowerCAmelCase : Optional[Any] = processor.post_process_masks(a__ , a__ , a__ , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : Dict = processor.post_process_masks(
a__ , tf.convert_to_tensor(a__ ) , tf.convert_to_tensor(a__ ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_lowerCAmelCase : List[str] = [np.ones((1, 3, 5, 5) )]
_lowerCAmelCase : List[str] = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_lowerCAmelCase : Any = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_lowerCAmelCase : Tuple = processor.post_process_masks(
a__ , np.array(a__ ) , np.array(a__ ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = SamImageProcessor()
_lowerCAmelCase : str = SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **a__ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **a__ ).image_processor
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.get_image_processor()
_lowerCAmelCase : str = SamProcessor(image_processor=a__ )
_lowerCAmelCase : List[str] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_lowerCAmelCase : Union[str, Any] = [tf.convert_to_tensor(a__ )]
_lowerCAmelCase : Optional[Any] = [torch.tensor(a__ )]
_lowerCAmelCase : Tuple = [[1764, 2646]]
_lowerCAmelCase : Dict = [[683, 1024]]
_lowerCAmelCase : Optional[int] = processor.post_process_masks(
a__ , a__ , a__ , return_tensors="""tf""" )
_lowerCAmelCase : Dict = processor.post_process_masks(
a__ , a__ , a__ , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __A ( self ):
_lowerCAmelCase : Tuple = self.get_image_processor()
_lowerCAmelCase : Any = SamProcessor(image_processor=a__ )
_lowerCAmelCase : List[str] = self.prepare_image_inputs()
_lowerCAmelCase : Any = image_processor(a__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_lowerCAmelCase : Optional[int] = processor(images=a__ , return_tensors="""pt""" )["""pixel_values"""].numpy()
_lowerCAmelCase : Optional[Any] = image_processor(a__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
_lowerCAmelCase : Optional[Any] = processor(images=a__ , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
self.assertTrue(np.allclose(a__ , a__ ) )
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 1 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_a : Optional[Any] = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_a : List[str] = concatenate_datasets
_a : Optional[Any] = DownloadConfig
_a : Tuple = DownloadManager
_a : List[str] = DownloadMode
_a : Tuple = DownloadConfig
_a : Union[str, Any] = DownloadMode
_a : Optional[int] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 663 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ):
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Dict = 3
_lowerCAmelCase : str = (32, 32)
_lowerCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a__ )
return image
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a__ )
@property
def __A ( self ):
def extract(*a__ , **a__ ):
class __A :
def __init__( self ):
_lowerCAmelCase : Dict = torch.ones([0] )
def __A ( self , a__ ):
self.pixel_values.to(a__ )
return self
return Out()
return extract
def __A ( self ):
_lowerCAmelCase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Any = self.dummy_cond_unet
_lowerCAmelCase : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
_lowerCAmelCase : Optional[int] = self.dummy_vae
_lowerCAmelCase : Optional[Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : List[Any] = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : str = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : List[str] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Dict = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : Union[str, Any] = output.images
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Dict = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=a__ , )[0]
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
_lowerCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Any = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.dummy_cond_unet
_lowerCAmelCase : List[str] = PNDMScheduler(skip_prk_steps=a__ )
_lowerCAmelCase : str = self.dummy_vae
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : str = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : List[str] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Union[str, Any] = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : Optional[Any] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = sd_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : List[Any] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=a__ , )[0]
_lowerCAmelCase : str = image[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : Any = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=a__ )
assert isinstance(a__ , a__ )
assert isinstance(pipe.scheduler , a__ )
assert pipe.safety_checker is None
_lowerCAmelCase : List[str] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a__ )
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(a__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowerCAmelCase : List[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_cond_unet
_lowerCAmelCase : str = PNDMScheduler(skip_prk_steps=a__ )
_lowerCAmelCase : List[Any] = self.dummy_vae
_lowerCAmelCase : Optional[Any] = self.dummy_text_encoder
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_lowerCAmelCase : Dict = unet.half()
_lowerCAmelCase : List[str] = vae.half()
_lowerCAmelCase : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : List[str] = StableDiffusionPipeline(
unet=a__ , scheduler=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , safety_checker=a__ , feature_extractor=self.dummy_extractor , )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = """A painting of a squirrel eating a burger"""
_lowerCAmelCase : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=a__ )
_lowerCAmelCase : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : str = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
_lowerCAmelCase : Optional[Any] = 4003660346
_lowerCAmelCase : Union[str, Any] = 7
# without safety guidance (sld_guidance_scale = 0)
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(a__ )
_lowerCAmelCase : Tuple = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(a__ )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Optional[Any] = output.images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : Any = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=a__ )
_lowerCAmelCase : List[str] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[Any] = """padme amidala taking a bath artwork, safe for work, no nudity"""
_lowerCAmelCase : int = 2734971755
_lowerCAmelCase : Optional[Any] = 7
_lowerCAmelCase : Tuple = torch.manual_seed(a__ )
_lowerCAmelCase : Dict = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : Dict = output.images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
_lowerCAmelCase : Optional[Any] = torch.manual_seed(a__ )
_lowerCAmelCase : str = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Dict = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
_lowerCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_lowerCAmelCase : Optional[int] = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[Any] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
_lowerCAmelCase : str = 1044355234
_lowerCAmelCase : int = 12
_lowerCAmelCase : Optional[int] = torch.manual_seed(a__ )
_lowerCAmelCase : Tuple = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Tuple = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
_lowerCAmelCase : Optional[int] = torch.manual_seed(a__ )
_lowerCAmelCase : List[str] = sd_pipe(
[prompt] , generator=a__ , guidance_scale=a__ , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
_lowerCAmelCase : Optional[int] = output.images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 663 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : Any = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "data2vec-vision"
def __init__( self , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.0 , a__=0.0 , a__=0.0_2 , a__=1e-12 , a__=224 , a__=16 , a__=3 , a__=False , a__=False , a__=False , a__=False , a__=0.1 , a__=0.1 , a__=True , a__=[3, 5, 7, 11] , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Any = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Tuple = use_mask_token
_lowerCAmelCase : Tuple = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : Dict = use_shared_relative_position_bias
_lowerCAmelCase : Optional[Any] = layer_scale_init_value
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : str = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : str = use_auxiliary_head
_lowerCAmelCase : Optional[int] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Tuple = auxiliary_num_convs
_lowerCAmelCase : Dict = auxiliary_concat_input
_lowerCAmelCase : Optional[int] = semantic_loss_ignore_index
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = version.parse("1.11" )
@property
def __A ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __A ( self ):
return 1e-4
| 663 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["speech"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""speech"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["speech"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""speech"""] )
| 663 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Any = GPTaTokenizer
_UpperCamelCase : Optional[int] = GPTaTokenizerFast
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : str = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Dict = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : Any = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : List[Any] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : List[str] = """lower newer"""
_lowerCAmelCase : Union[str, Any] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : List[str] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : List[str] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : Dict = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : int = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Optional[Any] = """This is a simple input"""
_lowerCAmelCase : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Tuple = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Any = """This is a simple input"""
_lowerCAmelCase : List[Any] = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : List[str] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Union[str, Any] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[Any] = tokenizer.pad_token_id
_lowerCAmelCase : Tuple = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : List[str] = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : Tuple = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : str = """$$$"""
_lowerCAmelCase : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : List[str] = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = tokenizer.bos_token_id
_lowerCAmelCase : Optional[int] = tokenizer(a__ )
_lowerCAmelCase : Optional[Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : List[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def __A ( self ):
pass
def __A ( self ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
_lowerCAmelCase : List[Any] = [self.get_tokenizer(do_lower_case=a__ , add_bos_token=a__ )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
_lowerCAmelCase : int = """Encode this."""
_lowerCAmelCase : Dict = """This one too please."""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ , add_special_tokens=a__ )
encoded_sequence += tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : List[Any] = tokenizer.encode_plus(
a__ , a__ , add_special_tokens=a__ , return_special_tokens_mask=a__ , )
_lowerCAmelCase : Tuple = encoded_sequence_dict["""input_ids"""]
_lowerCAmelCase : List[str] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(a__ ) , len(a__ ) )
_lowerCAmelCase : Dict = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(a__ )
]
_lowerCAmelCase : List[str] = [x for x in filtered_sequence if x is not None]
self.assertEqual(a__ , a__ )
@require_tokenizers
class __A ( unittest.TestCase ):
def __A ( self ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
_lowerCAmelCase : Tuple = """A photo of a cat"""
_lowerCAmelCase : List[str] = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""test_opt""" )
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""./test_opt""" )
_lowerCAmelCase : List[Any] = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
def __A ( self ):
_lowerCAmelCase : str = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=a__ )
_lowerCAmelCase : Any = """A photo of a cat"""
_lowerCAmelCase : Dict = tokenizer.encode(
a__ , )
# Same as above
self.assertEqual(a__ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def __A ( self ):
_lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=a__ )
_lowerCAmelCase : Dict = """bos"""
_lowerCAmelCase : str = tokenizer.get_vocab()["""bos"""]
_lowerCAmelCase : Any = """A photo of a cat"""
_lowerCAmelCase : List[Any] = tokenizer.encode(
a__ , )
# We changed the bos token
self.assertEqual(a__ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained("""./tok""" )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(
a__ , )
self.assertEqual(a__ , [31957, 250, 1345, 9, 10, 4758] )
| 663 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = IFPipeline
_UpperCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
_UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : int = PipelineTesterMixin.required_optional_params - {"latents"}
def __A ( self ):
return self._get_dummy_components()
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : str = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __A ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __A ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __A ( self ):
self._test_save_load_local()
def __A ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
# if
_lowerCAmelCase : Union[str, Any] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
_lowerCAmelCase : List[str] = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=a__ , tokenizer=a__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
_lowerCAmelCase , _lowerCAmelCase : Any = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase : Any = IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase : Any = IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase : Tuple = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a__ , a__ , a__ , a__ )
def __A ( self , a__ , a__ , a__ , a__ ):
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Any = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , num_inference_steps=2 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
_lowerCAmelCase : int = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , generator=a__ , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(a__ , a__ )
def __A ( self , a__ , a__ , a__ , a__ ):
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Tuple = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , num_inference_steps=2 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
_lowerCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
_lowerCAmelCase : str = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : Any = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(a__ , a__ )
def __A ( self , a__ , a__ , a__ , a__ ):
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : str = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , num_inference_steps=2 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : str = output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a__ )
_lowerCAmelCase : Dict = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="""np""" , )
_lowerCAmelCase : int = output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(a__ , a__ )
def SCREAMING_SNAKE_CASE ( ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 663 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 1 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def SCREAMING_SNAKE_CASE ( ) -> str:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_lowerCAmelCase : int = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching ,"""os.path.join""" ,_lowerCamelCase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os ,_PatchedModuleObj )
assert isinstance(_test_patching.os.path ,_PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path ,_PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os ,_PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path ,_PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path ,_PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
assert _test_patching.open is open
_lowerCAmelCase : Optional[int] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching ,"""open""" ,_lowerCamelCase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
# pandas.read_csv is not present in _test_patching
_lowerCAmelCase : int = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching ,"""pandas.read_csv""" ,_lowerCamelCase ):
pass
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
_lowerCAmelCase : Any = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching ,"""len""" ,_lowerCamelCase ) is None
with patch_submodule(_test_patching ,"""len""" ,_lowerCamelCase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : List[str] = """__test_patch_submodule_start_and_stop_mock__"""
_lowerCAmelCase : Tuple = patch_submodule(_test_patching ,"""open""" ,_lowerCamelCase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def SCREAMING_SNAKE_CASE ( ) -> Any:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_lowerCAmelCase : Dict = """__test_patch_submodule_successive_join__"""
_lowerCAmelCase : List[Any] = """__test_patch_submodule_successive_dirname__"""
_lowerCAmelCase : List[Any] = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching ,"""os.path.join""" ,_lowerCamelCase ):
with patch_submodule(_test_patching ,"""os.rename""" ,_lowerCamelCase ):
with patch_submodule(_test_patching ,"""os.path.dirname""" ,_lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching ,"""os.rename""" ,_lowerCamelCase ):
with patch_submodule(_test_patching ,"""os.path.join""" ,_lowerCamelCase ):
with patch_submodule(_test_patching ,"""os.path.dirname""" ,_lowerCamelCase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : Optional[Any] = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching ,"""__module_that_doesn_exist__.__attribute_that_doesn_exist__""" ,_lowerCamelCase ):
pass
with patch_submodule(_test_patching ,"""os.__attribute_that_doesn_exist__""" ,_lowerCamelCase ):
pass
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_a : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_a : Dict = parser.parse_args()
_a : List[str] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
class __A :
def __init__( self , a__ = None ):
_lowerCAmelCase : Dict = value
_lowerCAmelCase : Any = random()
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{F"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1 )
def __str__( self ):
_lowerCAmelCase : List[Any] = str(self.value ) + """ """
_lowerCAmelCase : int = str(self.left or """""" )
_lowerCAmelCase : Union[str, Any] = str(self.right or """""" )
return value + left + right
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = split(root.left ,_lowerCamelCase )
return left, root
else:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = split(root.right ,_lowerCamelCase )
return root, right
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCAmelCase : List[Any] = merge(left.right ,_lowerCamelCase )
return left
else:
_lowerCAmelCase : int = merge(_lowerCamelCase ,right.left )
return right
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Node | None:
_lowerCAmelCase : Optional[Any] = Node(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Any = split(_lowerCamelCase ,_lowerCamelCase )
return merge(merge(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Node | None:
_lowerCAmelCase , _lowerCAmelCase : str = split(_lowerCamelCase ,value - 1 )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = split(_lowerCamelCase ,_lowerCamelCase )
return merge(_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value ,end=""",""" )
inorder(root.right )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
_lowerCAmelCase : Any = insert(_lowerCamelCase ,int(arg[1:] ) )
elif arg[0] == "-":
_lowerCAmelCase : Optional[Any] = erase(_lowerCamelCase ,int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def SCREAMING_SNAKE_CASE ( ) -> None:
_lowerCAmelCase : Union[str, Any] = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
_lowerCAmelCase : Union[str, Any] = input()
while args != "q":
_lowerCAmelCase : str = interact_treap(_lowerCamelCase ,_lowerCamelCase )
print(_lowerCamelCase )
_lowerCAmelCase : Dict = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[str] ) -> Union[str, Any]:
# Initialise PyTorch model
_lowerCAmelCase : Dict = MobileBertConfig.from_json_file(_lowerCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase : Any = MobileBertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
_lowerCAmelCase : Optional[Any] = load_tf_weights_in_mobilebert(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() ,_lowerCamelCase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 663 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[Any] = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = DebertaVaTokenizer
_UpperCamelCase : Optional[int] = DebertaVaTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = DebertaVaTokenizer(a__ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = """this is a test"""
_lowerCAmelCase : Any = """this is a test"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(a__ ) , 30001 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def __A ( self ):
# fmt: off
_lowerCAmelCase : int = """ \tHeLLo!how \n Are yoU? """
_lowerCAmelCase : Optional[Any] = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
_lowerCAmelCase : List[Any] = DebertaVaTokenizer(a__ , do_lower_case=a__ )
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Optional[int] = DebertaVaTokenizerFast(a__ , do_lower_case=a__ )
_lowerCAmelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __A ( self ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def __A ( self ):
pass
def __A ( self ):
# fmt: off
_lowerCAmelCase : Any = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : Any = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
_lowerCAmelCase : Dict = DebertaVaTokenizer(a__ , split_by_punct=a__ )
_lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : List[str] = DebertaVaTokenizerFast(a__ , split_by_punct=a__ )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def __A ( self ):
# fmt: off
_lowerCAmelCase : int = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[str] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
_lowerCAmelCase : Optional[int] = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def __A ( self ):
# fmt: off
_lowerCAmelCase : int = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : Tuple = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
_lowerCAmelCase : List[Any] = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
_lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
_lowerCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
_lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
_lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Any = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def __A ( self ):
# fmt: off
_lowerCAmelCase : Optional[Any] = """ \tHeLLo!how \n Are yoU? """
_lowerCAmelCase : Optional[int] = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
_lowerCAmelCase : Any = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
_lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Dict = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
_lowerCAmelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : Any = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
_lowerCAmelCase : str = tokenizer.encode(a__ )
_lowerCAmelCase : List[str] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : int = """This is a test"""
_lowerCAmelCase : Optional[int] = [13, 1, 4398, 25, 21, 1289]
_lowerCAmelCase : List[str] = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
_lowerCAmelCase : int = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
_lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : str = DebertaVaTokenizerFast(a__ , keep_accents=a__ )
_lowerCAmelCase : Tuple = tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
# fmt: off
_lowerCAmelCase : int = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : Tuple = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_lowerCAmelCase : Any = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
_lowerCAmelCase : Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
_lowerCAmelCase : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : List[str] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Optional[int] = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
def __A ( self ):
_lowerCAmelCase : Any = DebertaVaTokenizer(a__ )
_lowerCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" )
_lowerCAmelCase : List[str] = tokenizer.encode("""multi-sequence build""" )
_lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(a__ )
_lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a__ , )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {"""input_ids""": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 663 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 1 |
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __A ( unittest.TestCase ):
@require_torch
def __A ( self ):
_lowerCAmelCase : Optional[Any] = pipeline(
task="""zero-shot-audio-classification""" , model="""hf-internal-testing/tiny-clap-htsat-unfused""" )
_lowerCAmelCase : List[Any] = load_dataset("""ashraq/esc50""" )
_lowerCAmelCase : Any = dataset["""train"""]["""audio"""][-1]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(a__ ) , [{"""score""": 0.5_0_1, """label""": """Sound of a dog"""}, {"""score""": 0.4_9_9, """label""": """Sound of vaccum cleaner"""}] , )
@unittest.skip("""No models are available in TF""" )
def __A ( self ):
pass
@slow
@require_torch
def __A ( self ):
_lowerCAmelCase : Optional[Any] = pipeline(
task="""zero-shot-audio-classification""" , model="""laion/clap-htsat-unfused""" , )
# This is an audio of a dog
_lowerCAmelCase : Union[str, Any] = load_dataset("""ashraq/esc50""" )
_lowerCAmelCase : List[str] = dataset["""train"""]["""audio"""][-1]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(a__ ) , [
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
] , )
_lowerCAmelCase : Any = audio_classifier([audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] )
self.assertEqual(
nested_simplify(a__ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
_lowerCAmelCase : List[str] = audio_classifier(
[audio] * 5 , candidate_labels=["""Sound of a dog""", """Sound of vaccum cleaner"""] , batch_size=5 )
self.assertEqual(
nested_simplify(a__ ) , [
[
{"""score""": 0.9_9_9, """label""": """Sound of a dog"""},
{"""score""": 0.0_0_1, """label""": """Sound of vaccum cleaner"""},
],
]
* 5 , )
@unittest.skip("""No models are available in TF""" )
def __A ( self ):
pass
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 1 |
"""simple docstring"""
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ) -> str:
if isinstance(_lowerCamelCase ,torch.Tensor ):
return image
elif isinstance(_lowerCamelCase ,PIL.Image.Image ):
_lowerCAmelCase : List[Any] = [image]
if isinstance(image[0] ,PIL.Image.Image ):
_lowerCAmelCase : str = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
_lowerCAmelCase : Tuple = np.concatenate(_lowerCamelCase ,axis=0 )
_lowerCAmelCase : List[str] = np.array(_lowerCamelCase ).astype(np.floataa ) / 2_55.0
_lowerCAmelCase : Tuple = image.transpose(0 ,3 ,1 ,2 )
_lowerCAmelCase : str = 2.0 * image - 1.0
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] ,torch.Tensor ):
_lowerCAmelCase : List[Any] = torch.cat(_lowerCamelCase ,dim=0 )
return image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Any=0.99_95 ) -> List[str]:
if not isinstance(_lowerCamelCase ,np.ndarray ):
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : Dict = va.device
_lowerCAmelCase : Optional[Any] = va.cpu().numpy()
_lowerCAmelCase : Optional[int] = va.cpu().numpy()
_lowerCAmelCase : Tuple = np.sum(va * va / (np.linalg.norm(_lowerCamelCase ) * np.linalg.norm(_lowerCamelCase )) )
if np.abs(_lowerCamelCase ) > DOT_THRESHOLD:
_lowerCAmelCase : List[Any] = (1 - t) * va + t * va
else:
_lowerCAmelCase : Tuple = np.arccos(_lowerCamelCase )
_lowerCAmelCase : int = np.sin(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = theta_a * t
_lowerCAmelCase : int = np.sin(_lowerCamelCase )
_lowerCAmelCase : Tuple = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase : str = sin_theta_t / sin_theta_a
_lowerCAmelCase : str = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase : str = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
return va
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = F.normalize(_lowerCamelCase ,dim=-1 )
_lowerCAmelCase : List[str] = F.normalize(_lowerCamelCase ,dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[int] ) -> Dict:
for param in model.parameters():
_lowerCAmelCase : Optional[Any] = value
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__=None , a__=None , a__=None , ):
super().__init__()
self.register_modules(
vae=a__ , text_encoder=a__ , clip_model=a__ , tokenizer=a__ , unet=a__ , scheduler=a__ , feature_extractor=a__ , coca_model=a__ , coca_tokenizer=a__ , coca_transform=a__ , )
_lowerCAmelCase : Dict = (
feature_extractor.size
if isinstance(feature_extractor.size , a__ )
else feature_extractor.size["""shortest_edge"""]
)
_lowerCAmelCase : Optional[int] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a__ )
set_requires_grad(self.clip_model , a__ )
def __A ( self , a__ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a__ )
def __A ( self ):
self.enable_attention_slicing(a__ )
def __A ( self ):
set_requires_grad(self.vae , a__ )
def __A ( self ):
set_requires_grad(self.vae , a__ )
def __A ( self ):
set_requires_grad(self.unet , a__ )
def __A ( self ):
set_requires_grad(self.unet , a__ )
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : List[str] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , torch.Tensor ):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(a__ )}" )
_lowerCAmelCase : Any = image.to(device=a__ , dtype=a__ )
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[str] = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : List[str] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : int = self.vae.encode(a__ ).latent_dist.sample(a__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : List[str] = 0.1_8_2_1_5 * init_latents
_lowerCAmelCase : Optional[int] = init_latents.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Union[str, Any] = randn_tensor(init_latents.shape , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[int] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__ ):
_lowerCAmelCase : List[str] = self.coca_transform(a__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase : str = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowerCAmelCase : Optional[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : str = self.feature_extractor.preprocess(a__ )
_lowerCAmelCase : str = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
_lowerCAmelCase : Tuple = self.clip_model.get_image_features(a__ )
_lowerCAmelCase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a__ )
_lowerCAmelCase : Optional[int] = image_embeddings_clip.repeat_interleave(a__ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
_lowerCAmelCase : str = latents.detach().requires_grad_()
_lowerCAmelCase : List[str] = self.scheduler.scale_model_input(a__ , a__ )
# predict the noise residual
_lowerCAmelCase : Any = self.unet(a__ , a__ , encoder_hidden_states=a__ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowerCAmelCase : str = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase : str = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase : Optional[int] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase : Union[str, Any] = torch.sqrt(a__ )
_lowerCAmelCase : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a__ ):
_lowerCAmelCase : Optional[Any] = self.scheduler.sigmas[index]
_lowerCAmelCase : Dict = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * sample
_lowerCAmelCase : int = self.vae.decode(a__ ).sample
_lowerCAmelCase : int = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : Any = transforms.Resize(self.feature_extractor_size )(a__ )
_lowerCAmelCase : str = self.normalize(a__ ).to(latents.dtype )
_lowerCAmelCase : Union[str, Any] = self.clip_model.get_image_features(a__ )
_lowerCAmelCase : Optional[int] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a__ )
_lowerCAmelCase : Optional[Any] = spherical_dist_loss(a__ , a__ ).mean() * clip_guidance_scale
_lowerCAmelCase : Union[str, Any] = -torch.autograd.grad(a__ , a__ )[0]
if isinstance(self.scheduler , a__ ):
_lowerCAmelCase : Any = latents.detach() + grads * (sigma**2)
_lowerCAmelCase : Dict = noise_pred_original
else:
_lowerCAmelCase : Optional[int] = noise_pred_original - torch.sqrt(a__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , a__ , a__ , a__ = None , a__ = None , a__ = 512 , a__ = 512 , a__ = 0.6 , a__ = 50 , a__ = 7.5 , a__ = 1 , a__ = 0.0 , a__ = 100 , a__ = None , a__ = "pil" , a__ = True , a__ = 0.8 , a__ = 0.1 , a__ = 0.1 , ):
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(a__ )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(a__ , torch.Generator ) and batch_size > 1:
_lowerCAmelCase : Any = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase : Tuple = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
_lowerCAmelCase : Tuple = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase : Optional[int] = """, """.join(a__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a__ ):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
_lowerCAmelCase : Optional[int] = self.get_image_description(a__ )
if style_prompt is None:
if len(a__ ):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
_lowerCAmelCase : str = self.get_image_description(a__ )
# get prompt text embeddings for content and style
_lowerCAmelCase : Any = self.tokenizer(
a__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=a__ , return_tensors="""pt""" , )
_lowerCAmelCase : Optional[Any] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase : Tuple = self.tokenizer(
a__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=a__ , return_tensors="""pt""" , )
_lowerCAmelCase : str = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowerCAmelCase : Optional[Any] = slerp(a__ , a__ , a__ )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase : Union[str, Any] = text_embeddings.repeat_interleave(a__ , dim=0 )
# set timesteps
_lowerCAmelCase : Dict = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowerCAmelCase : Any = {}
if accepts_offset:
_lowerCAmelCase : Any = 1
self.scheduler.set_timesteps(a__ , **a__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowerCAmelCase , _lowerCAmelCase : str = self.get_timesteps(a__ , a__ , self.device )
_lowerCAmelCase : str = timesteps[:1].repeat(a__ )
# Preprocess image
_lowerCAmelCase : Optional[Any] = preprocess(a__ , a__ , a__ )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , text_embeddings.dtype , self.device , a__ )
_lowerCAmelCase : Tuple = preprocess(a__ , a__ , a__ )
_lowerCAmelCase : Any = self.prepare_latents(
a__ , a__ , a__ , text_embeddings.dtype , self.device , a__ )
_lowerCAmelCase : Union[str, Any] = slerp(a__ , a__ , a__ )
if clip_guidance_scale > 0:
_lowerCAmelCase : str = self.get_clip_image_embeddings(a__ , a__ )
_lowerCAmelCase : Optional[int] = self.get_clip_image_embeddings(a__ , a__ )
_lowerCAmelCase : Optional[Any] = slerp(
a__ , a__ , a__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : Tuple = content_text_input.input_ids.shape[-1]
_lowerCAmelCase : Tuple = self.tokenizer([""""""] , padding="""max_length""" , max_length=a__ , return_tensors="""pt""" )
_lowerCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase : int = uncond_embeddings.repeat_interleave(a__ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : Any = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase : int = torch.randn(a__ , generator=a__ , device="""cpu""" , dtype=a__ ).to(
self.device )
else:
_lowerCAmelCase : Dict = torch.randn(a__ , generator=a__ , device=self.device , dtype=a__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
_lowerCAmelCase : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : Optional[int] = {}
if accepts_eta:
_lowerCAmelCase : int = eta
# check if the scheduler accepts generator
_lowerCAmelCase : str = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowerCAmelCase : Optional[int] = generator
with self.progress_bar(total=a__ ):
for i, t in enumerate(a__ ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : Optional[int] = self.scheduler.scale_model_input(a__ , a__ )
# predict the noise residual
_lowerCAmelCase : Tuple = self.unet(a__ , a__ , encoder_hidden_states=a__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : int = noise_pred.chunk(2 )
_lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase : Union[str, Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.cond_fn(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : int = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase : Dict = self.vae.decode(a__ ).sample
_lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase : str = self.numpy_to_pil(a__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a__ , nsfw_content_detected=a__ )
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , ):
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[Any] = use_labels
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : int = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : Union[str, Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Optional[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Any = num_patches + 1
def __A ( self ):
_lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Any = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, pixel_values
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = FlaxViTModel(config=a__ )
_lowerCAmelCase : int = model(a__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Tuple = (self.image_size, self.image_size)
_lowerCAmelCase : int = (self.patch_size, self.patch_size)
_lowerCAmelCase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Any = self.type_sequence_label_size
_lowerCAmelCase : Any = FlaxViTForImageClassification(config=a__ )
_lowerCAmelCase : Optional[int] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : int = FlaxViTForImageClassification(a__ )
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Any = model(a__ )
def __A ( self ):
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Tuple = config_and_inputs
_lowerCAmelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __A ( self ):
_lowerCAmelCase : Dict = FlaxViTModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(a__ )
_lowerCAmelCase : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase : List[str] = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : Optional[int] = model_class(a__ )
@jax.jit
def model_jitted(a__ , **a__ ):
return model(pixel_values=a__ , **a__ )
with self.subTest("""JIT Enabled""" ):
_lowerCAmelCase : Any = model_jitted(**a__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_lowerCAmelCase : Any = model_jitted(**a__ ).to_tuple()
self.assertEqual(len(a__ ) , len(a__ ) )
for jitted_output, output in zip(a__ , a__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : str = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
_lowerCAmelCase : Optional[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(a__ )
| 663 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 1 |
"""simple docstring"""
from functools import lru_cache
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> set:
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCamelCase )
if n > 1:
factors.add(_lowerCamelCase )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> int:
return len(unique_prime_factors(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ) -> bool:
return len(set(_lowerCamelCase ) ) in (0, 1)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> list:
_lowerCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
_lowerCAmelCase : Tuple = [base + i for i in range(_lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_lowerCAmelCase : Tuple = [upf_len(_lowerCamelCase ) for x in group]
checker.append(_lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 4 ) -> int:
_lowerCAmelCase : str = run(_lowerCamelCase )
return results[0] if len(_lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 663 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int ) -> int:
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowerCAmelCase : Union[str, Any] = [p / w for p, w in zip(_lowerCamelCase ,_lowerCamelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowerCAmelCase : Union[str, Any] = sorted(_lowerCamelCase )
# declaring useful variables
_lowerCAmelCase : Dict = len(_lowerCamelCase )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : Dict = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowerCAmelCase : List[Any] = sorted_profit_by_weight[length - i - 1]
_lowerCAmelCase : Union[str, Any] = profit_by_weight.index(_lowerCamelCase )
_lowerCAmelCase : Dict = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
_a : List[Any] = [int(x) for x in input('Input profits separated by spaces: ').split()]
_a : List[Any] = [int(x) for x in input('Input weights separated by spaces: ').split()]
_a : str = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 663 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger(__name__)
_a : Union[str, Any] = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> Union[str, Any]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
_lowerCAmelCase : int = TOKENIZER_CLASSES
else:
_lowerCAmelCase : Any = {tokenizer_name: getattr(_lowerCamelCase ,tokenizer_name + """Fast""" )}
logger.info(f"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
_lowerCAmelCase : List[str] = TOKENIZER_CLASSES[tokenizer_name]
_lowerCAmelCase : Tuple = True
if checkpoint_name is None:
_lowerCAmelCase : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowerCAmelCase : Optional[Any] = [checkpoint_name]
logger.info(f"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(f"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
_lowerCAmelCase : Tuple = tokenizer_class.from_pretrained(_lowerCamelCase ,force_download=_lowerCamelCase )
# Save fast tokenizer
logger.info(f"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = checkpoint.split("""/""" )
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase ,_lowerCamelCase )
elif add_prefix:
_lowerCAmelCase : Dict = checkpoint
_lowerCAmelCase : Tuple = dump_path
else:
_lowerCAmelCase : str = None
_lowerCAmelCase : int = dump_path
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowerCAmelCase : Dict = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowerCAmelCase : Tuple = file_path.split(_lowerCamelCase )[-1][0]
if next_char == "/":
_lowerCAmelCase : List[str] = os.path.join(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Tuple = None
logger.info(f"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
_lowerCAmelCase : Optional[int] = tokenizer.save_pretrained(
_lowerCamelCase ,legacy_format=_lowerCamelCase ,filename_prefix=_lowerCamelCase )
logger.info(f"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(_lowerCamelCase )
logger.info(f"=> removing {file_name}" )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
_a : Optional[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 663 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __A :
def __init__( self , a__ , a__=13 , a__=32 , a__=2 , a__=3 , a__=16 , a__=[1, 2, 1] , a__=[2, 2, 4] , a__=2 , a__=2.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=True , a__=0.0_2 , a__=1e-5 , a__=True , a__=None , a__=True , a__=10 , a__=8 , a__=["stage1", "stage2", "stage3"] , a__=[1, 2, 3] , ):
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Optional[int] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[str] = embed_dim
_lowerCAmelCase : str = depths
_lowerCAmelCase : Tuple = num_heads
_lowerCAmelCase : Dict = window_size
_lowerCAmelCase : Optional[Any] = mlp_ratio
_lowerCAmelCase : Tuple = qkv_bias
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : List[Any] = use_absolute_embeddings
_lowerCAmelCase : Tuple = patch_norm
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : str = scope
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : List[str] = type_sequence_label_size
_lowerCAmelCase : List[Any] = encoder_stride
_lowerCAmelCase : Any = out_features
_lowerCAmelCase : int = out_indices
def __A ( self ):
_lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : str = MaskFormerSwinModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(a__ )
_lowerCAmelCase : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowerCAmelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = MaskFormerSwinBackbone(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : int = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(a__ ):
_lowerCAmelCase : int = ["""stem"""]
_lowerCAmelCase : Union[str, Any] = MaskFormerSwinBackbone(config=a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = config_and_inputs
_lowerCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : List[Any] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[str] = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = MaskFormerSwinModelTester(self )
_lowerCAmelCase : List[str] = ConfigTester(self , config_class=a__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def __A ( self ):
pass
def __A ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self ):
return
def __A ( self ):
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : int = model_class(a__ )
_lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def __A ( self ):
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def __A ( self ):
pass
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Dict = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(a__ , a__ ) )
_lowerCAmelCase : Union[str, Any] = outputs.hidden_states
_lowerCAmelCase : Any = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a__ ) , a__ )
# Swin has a different seq_length
_lowerCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : int = True
self.check_hidden_states_output(a__ , a__ , a__ , a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Tuple = 3
_lowerCAmelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowerCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowerCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowerCAmelCase : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Optional[Any] = True
self.check_hidden_states_output(a__ , a__ , a__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def __A ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __A ( self ):
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(a__ ):
_lowerCAmelCase : List[str] = 0
return t
def check_equivalence(a__ , a__ , a__ , a__={} ):
with torch.no_grad():
_lowerCAmelCase : Dict = model(**a__ , return_dict=a__ , **a__ )
_lowerCAmelCase : Dict = model(**a__ , return_dict=a__ , **a__ ).to_tuple()
def recursive_check(a__ , a__ ):
if isinstance(a__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a__ , a__ ):
recursive_check(a__ , a__ )
elif isinstance(a__ , a__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(a__ , a__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(a__ ) , set_nan_tensor_to_zero(a__ ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(a__ ).any()} and `inf`: {torch.isinf(a__ )}. Dict has"
F" `nan`: {torch.isnan(a__ ).any()} and `inf`: {torch.isinf(a__ )}."
) , )
recursive_check(a__ , a__ )
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Any = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : Optional[Any] = self._prepare_for_class(a__ , a__ )
check_equivalence(a__ , a__ , a__ )
_lowerCAmelCase : List[Any] = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCAmelCase : Tuple = self._prepare_for_class(a__ , a__ , return_labels=a__ )
check_equivalence(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(a__ , a__ )
_lowerCAmelCase : Dict = self._prepare_for_class(a__ , a__ )
check_equivalence(a__ , a__ , a__ , {"""output_hidden_states""": True} )
_lowerCAmelCase : Union[str, Any] = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_lowerCAmelCase : Any = self._prepare_for_class(a__ , a__ , return_labels=a__ )
check_equivalence(a__ , a__ , a__ , {"""output_hidden_states""": True} )
@require_torch
class __A ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_UpperCamelCase : Optional[Any] = MaskFormerSwinConfig
def __A ( self ):
_lowerCAmelCase : Tuple = MaskFormerSwinModelTester(self )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Optional[Any] = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = backbone_class(a__ )
backbone.to(a__ )
backbone.eval()
_lowerCAmelCase : Optional[Any] = backbone(**a__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , a__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_lowerCAmelCase : str = backbone(**a__ , output_hidden_states=a__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_lowerCAmelCase : Any = backbone(**a__ , output_attentions=a__ )
self.assertIsNotNone(outputs.attentions )
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __A :
def __A ( self , a__ ):
raise NotImplementedError()
def __A ( self ):
raise NotImplementedError()
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = False , **a__ ):
_lowerCAmelCase : int = tokenizer
_lowerCAmelCase : List[Any] = skip_prompt
_lowerCAmelCase : int = decode_kwargs
# variables used in the streaming process
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = True
def __A ( self , a__ ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
_lowerCAmelCase : Dict = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_lowerCAmelCase : List[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_lowerCAmelCase : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
_lowerCAmelCase : int = text[self.print_len :]
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[Any] = 0
# If the last token is a CJK character, we print the characters.
elif len(a__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_lowerCAmelCase : List[str] = text[self.print_len :]
self.print_len += len(a__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_lowerCAmelCase : Optional[int] = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(a__ )
self.on_finalized_text(a__ )
def __A ( self ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_lowerCAmelCase : Optional[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_lowerCAmelCase : Union[str, Any] = text[self.print_len :]
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Tuple = 0
else:
_lowerCAmelCase : Optional[Any] = """"""
_lowerCAmelCase : Dict = True
self.on_finalized_text(a__ , stream_end=a__ )
def __A ( self , a__ , a__ = False ):
print(a__ , flush=a__ , end="""""" if not stream_end else None )
def __A ( self , a__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_E00 and cp <= 0x9_FFF)
or (cp >= 0x3_400 and cp <= 0x4_DBF) #
or (cp >= 0x20_000 and cp <= 0x2A_6DF) #
or (cp >= 0x2A_700 and cp <= 0x2B_73F) #
or (cp >= 0x2B_740 and cp <= 0x2B_81F) #
or (cp >= 0x2B_820 and cp <= 0x2C_EAF) #
or (cp >= 0xF_900 and cp <= 0xF_AFF)
or (cp >= 0x2F_800 and cp <= 0x2F_A1F) #
): #
return True
return False
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = False , a__ = None , **a__ ):
super().__init__(a__ , a__ , **a__ )
_lowerCAmelCase : List[Any] = Queue()
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Union[str, Any] = timeout
def __A ( self , a__ , a__ = False ):
self.text_queue.put(a__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ):
return self
def __A ( self ):
_lowerCAmelCase : List[str] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 663 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 | 1 |
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = "EncodecFeatureExtractor"
_UpperCamelCase : Optional[int] = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , a__ , a__ ):
super().__init__(a__ , a__ )
_lowerCAmelCase : Optional[Any] = self.feature_extractor
_lowerCAmelCase : Dict = False
def __A ( self , a__=None , a__=None , a__=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a__ , language=a__ , no_timestamps=a__ )
def __call__( self , *a__ , **a__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__ )
_lowerCAmelCase : Any = kwargs.pop("""audio""" , a__ )
_lowerCAmelCase : Dict = kwargs.pop("""sampling_rate""" , a__ )
_lowerCAmelCase : List[Any] = kwargs.pop("""text""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : List[str] = args[0]
_lowerCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(a__ , **a__ )
if audio is not None:
_lowerCAmelCase : List[str] = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_lowerCAmelCase : Optional[int] = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
_lowerCAmelCase : Union[str, Any] = audio_inputs["""padding_mask"""]
return inputs
def __A ( self , *a__ , **a__ ):
_lowerCAmelCase : Optional[int] = kwargs.pop("""audio""" , a__ )
_lowerCAmelCase : Dict = kwargs.pop("""padding_mask""" , a__ )
if len(a__ ) > 0:
_lowerCAmelCase : Optional[int] = args[0]
_lowerCAmelCase : str = args[1:]
if audio_values is not None:
return self._decode_audio(a__ , padding_mask=a__ )
else:
return self.tokenizer.batch_decode(*a__ , **a__ )
def __A ( self , *a__ , **a__ ):
return self.tokenizer.decode(*a__ , **a__ )
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = to_numpy(a__ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = audio_values.shape
if padding_mask is None:
return list(a__ )
_lowerCAmelCase : Any = to_numpy(a__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_lowerCAmelCase : List[str] = seq_len - padding_mask.shape[-1]
_lowerCAmelCase : Optional[int] = 1 - self.feature_extractor.padding_value
_lowerCAmelCase : List[str] = np.pad(a__ , ((0, 0), (0, difference)) , """constant""" , constant_values=a__ )
_lowerCAmelCase : List[str] = audio_values.tolist()
for i in range(a__ ):
_lowerCAmelCase : Union[str, Any] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_lowerCAmelCase : Optional[int] = sliced_audio.reshape(a__ , -1 )
return audio_values
| 663 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = CodeGenTokenizer
_UpperCamelCase : Dict = CodeGenTokenizerFast
_UpperCamelCase : Tuple = True
_UpperCamelCase : List[Any] = {"add_prefix_space": True}
_UpperCamelCase : str = False
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Union[str, Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
_lowerCAmelCase : Optional[int] = dict(zip(a__ , range(len(a__ ) ) ) )
_lowerCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : Any = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a__ ) )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def __A ( self , a__ ):
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Tuple = """lower newer"""
return input_text, output_text
def __A ( self ):
_lowerCAmelCase : str = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : List[str] = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(a__ , add_prefix_space=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = tokens + [tokenizer.unk_token]
_lowerCAmelCase : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Any = """lower newer"""
# Testing tokenization
_lowerCAmelCase : Any = tokenizer.tokenize(a__ , add_prefix_space=a__ )
_lowerCAmelCase : int = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Dict = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase : int = self.get_rust_tokenizer(add_prefix_space=a__ )
_lowerCAmelCase : Optional[int] = tokenizer.encode(a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
# Testing the unknown token
_lowerCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def __A ( self , *a__ , **a__ ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def __A ( self , a__=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
_lowerCAmelCase : Dict = """This is a simple input"""
_lowerCAmelCase : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : Optional[int] = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : str = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="""max_length""" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="""max_length""" , )
def __A ( self ):
_lowerCAmelCase : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
_lowerCAmelCase : Union[str, Any] = """This is a simple input"""
_lowerCAmelCase : Dict = ["""This is a simple input looooooooong""", """This is a simple input"""]
_lowerCAmelCase : Any = ("""This is a simple input""", """This is a pair""")
_lowerCAmelCase : Optional[int] = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
_lowerCAmelCase : Optional[int] = tokenizer.pad_token_id
_lowerCAmelCase : Any = tokenizer(a__ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
_lowerCAmelCase : str = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(*a__ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
_lowerCAmelCase : int = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def __A ( self ):
_lowerCAmelCase : List[str] = """$$$"""
_lowerCAmelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__ )
_lowerCAmelCase : Tuple = """This is a simple input"""
_lowerCAmelCase : Union[str, Any] = ["""This is a simple input 1""", """This is a simple input 2"""]
_lowerCAmelCase : List[str] = tokenizer.bos_token_id
_lowerCAmelCase : str = tokenizer(a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer(a__ )
self.assertEqual(out_s.input_ids[0] , a__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase : Optional[Any] = tokenizer.decode(out_s.input_ids )
_lowerCAmelCase : Optional[int] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , a__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def __A ( self ):
_lowerCAmelCase : int = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
_lowerCAmelCase : Optional[int] = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
_lowerCAmelCase : List[Any] = """\nif len_a > len_b: result = a\nelse: result = b"""
_lowerCAmelCase : Tuple = tokenizer.encode(a__ )
_lowerCAmelCase : Optional[Any] = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
_lowerCAmelCase : int = tokenizer.decode(a__ , truncate_before_pattern=a__ )
self.assertEqual(a__ , a__ )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_a : int = logging.get_logger(__name__)
@dataclass
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self , **a__ ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCAmelCase : Union[str, Any] = deprecated_arg[3:]
_lowerCAmelCase : Optional[Any] = not kwargs.pop(a__ )
logger.warning(
F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"
F" {positive_arg}={kwargs[positive_arg]}" )
_lowerCAmelCase : Dict = kwargs.pop("""tpu_name""" , self.tpu_name )
_lowerCAmelCase : Optional[Any] = kwargs.pop("""device_idx""" , self.device_idx )
_lowerCAmelCase : int = kwargs.pop("""eager_mode""" , self.eager_mode )
_lowerCAmelCase : List[str] = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**a__ )
_UpperCamelCase : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Name of TPU"} , )
_UpperCamelCase : int = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_UpperCamelCase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Benchmark models in eager model."} )
_UpperCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def __A ( self ):
requires_backends(self , ["""tf"""] )
_lowerCAmelCase : int = None
if self.tpu:
try:
if self.tpu_name:
_lowerCAmelCase : int = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCAmelCase : int = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCAmelCase : List[Any] = None
return tpu
@cached_property
def __A ( self ):
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCAmelCase : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
_lowerCAmelCase : Dict = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}" )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
_lowerCAmelCase : str = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}" )
return strategy
@property
def __A ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def __A ( self ):
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def __A ( self ):
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def __A ( self ):
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __A ( self ):
return self.n_gpu > 0
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : list[list[int]] ) -> int:
def update_area_of_max_square(_lowerCamelCase : int ,_lowerCamelCase : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_lowerCAmelCase : List[str] = update_area_of_max_square(_lowerCamelCase ,col + 1 )
_lowerCAmelCase : Optional[int] = update_area_of_max_square(row + 1 ,col + 1 )
_lowerCAmelCase : Optional[int] = update_area_of_max_square(row + 1 ,_lowerCamelCase )
if mat[row][col]:
_lowerCAmelCase : Dict = 1 + min([right, diagonal, down] )
_lowerCAmelCase : Optional[int] = max(largest_square_area[0] ,_lowerCamelCase )
return sub_problem_sol
else:
return 0
_lowerCAmelCase : int = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : list[list[int]] ) -> int:
def update_area_of_max_square_using_dp_array(
_lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_lowerCAmelCase : str = update_area_of_max_square_using_dp_array(_lowerCamelCase ,col + 1 ,_lowerCamelCase )
_lowerCAmelCase : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,_lowerCamelCase )
_lowerCAmelCase : str = update_area_of_max_square_using_dp_array(row + 1 ,_lowerCamelCase ,_lowerCamelCase )
if mat[row][col]:
_lowerCAmelCase : Any = 1 + min([right, diagonal, down] )
_lowerCAmelCase : Any = max(largest_square_area[0] ,_lowerCamelCase )
_lowerCAmelCase : str = sub_problem_sol
return sub_problem_sol
else:
return 0
_lowerCAmelCase : Union[str, Any] = [0]
_lowerCAmelCase : Tuple = [[-1] * cols for _ in range(_lowerCamelCase )]
update_area_of_max_square_using_dp_array(0 ,0 ,_lowerCamelCase )
return largest_square_area[0]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : list[list[int]] ) -> int:
_lowerCAmelCase : int = [[0] * (cols + 1) for _ in range(rows + 1 )]
_lowerCAmelCase : str = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
_lowerCAmelCase : Tuple = dp_array[row][col + 1]
_lowerCAmelCase : Optional[Any] = dp_array[row + 1][col + 1]
_lowerCAmelCase : Dict = dp_array[row + 1][col]
if mat[row][col] == 1:
_lowerCAmelCase : Dict = 1 + min(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : int = max(dp_array[row][col] ,_lowerCamelCase )
else:
_lowerCAmelCase : int = 0
return largest_square_area
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : int ,_lowerCamelCase : list[list[int]] ) -> int:
_lowerCAmelCase : int = [0] * (cols + 1)
_lowerCAmelCase : Optional[Any] = [0] * (cols + 1)
_lowerCAmelCase : List[Any] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
_lowerCAmelCase : List[str] = current_row[col + 1]
_lowerCAmelCase : Dict = next_row[col + 1]
_lowerCAmelCase : List[str] = next_row[col]
if mat[row][col] == 1:
_lowerCAmelCase : Any = 1 + min(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = max(current_row[col] ,_lowerCamelCase )
else:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_a : int = int(input('Enter number: ').strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 663 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Union[str, Any] ) -> List[Any]:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) ,end="""\t""" )
else:
print("""INF""" ,end="""\t""" )
print()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any] ) -> str:
_lowerCAmelCase : List[str] = [[float("""inf""" ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Any = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCamelCase ):
# looping through rows of graph array
for i in range(_lowerCamelCase ):
# looping through columns of graph array
for j in range(_lowerCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_lowerCAmelCase : List[str] = dist[i][k] + dist[k][j]
_print_dist(_lowerCamelCase ,_lowerCamelCase )
return dist, v
if __name__ == "__main__":
_a : Optional[Any] = int(input('Enter number of vertices: '))
_a : Dict = int(input('Enter number of edges: '))
_a : Any = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
_a : int = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
_a : Optional[Any] = int(input('Enter source:'))
_a : List[str] = int(input('Enter destination:'))
_a : Tuple = float(input('Enter weight:'))
_a : Tuple = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 663 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __A :
_UpperCamelCase : int
_UpperCamelCase : Node | None = None
_UpperCamelCase : Node | None = None
def SCREAMING_SNAKE_CASE ( ) -> Node | None:
_lowerCAmelCase : Tuple = Node(1 )
_lowerCAmelCase : int = Node(2 )
_lowerCAmelCase : int = Node(3 )
_lowerCAmelCase : Any = Node(4 )
_lowerCAmelCase : Dict = Node(5 )
return tree
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> int:
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
if root is None:
return output
_lowerCAmelCase : Union[str, Any] = deque([root] )
while process_queue:
_lowerCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> Sequence[Node | None]:
_lowerCAmelCase : list[Any] = []
def populate_output(_lowerCamelCase : Node | None ,_lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(_lowerCamelCase ,_lowerCamelCase )
return output
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_lowerCAmelCase : list[Sequence[Node | None]] = []
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = height(_lowerCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase ,_lowerCamelCase ) )
_lowerCAmelCase : Optional[int] = 0
return output
def SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_lowerCAmelCase : int = make_tree()
print(f"In-order Traversal: {inorder(_lowerCamelCase )}" )
print(f"Pre-order Traversal: {preorder(_lowerCamelCase )}" )
print(f"Post-order Traversal: {postorder(_lowerCamelCase )}" ,"""\n""" )
print(f"Height of Tree: {height(_lowerCamelCase )}" ,"""\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) ,"""\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 ,height(_lowerCamelCase ) + 1 ):
print(f"Level {level}:" ,get_nodes_from_left_to_right(_lowerCamelCase ,level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 663 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_a : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
_a : List[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
_a : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> dict[str, int]:
_lowerCAmelCase : Optional[int] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : tuple ) -> str:
return x[0]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> str:
_lowerCAmelCase : Any = get_letter_count(_lowerCamelCase )
_lowerCAmelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_lowerCamelCase )
_lowerCAmelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find ,reverse=_lowerCamelCase )
_lowerCAmelCase : Dict = """""".join(freq_to_letter[freq] )
_lowerCAmelCase : str = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_lowerCamelCase ,reverse=_lowerCamelCase )
_lowerCAmelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> int:
_lowerCAmelCase : Union[str, Any] = get_frequency_order(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : str = torch.nn.Linear(10 , 10 )
_lowerCAmelCase : Optional[Any] = torch.optim.SGD(model.parameters() , 0.1 )
_lowerCAmelCase : Optional[Any] = Accelerator()
_lowerCAmelCase : Tuple = accelerator.prepare(a__ )
try:
pickle.loads(pickle.dumps(a__ ) )
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}" )
AcceleratorState._reset_state()
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __A :
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.0_2 , a__=3 , a__=None , ):
_lowerCAmelCase : int = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_channels
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : List[str] = use_labels
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = type_sequence_label_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : Union[str, Any] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : Optional[int] = (image_size // patch_size) ** 2
_lowerCAmelCase : Tuple = num_patches + 1
def __A ( self ):
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFViTModel(config=a__ )
_lowerCAmelCase : Optional[Any] = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowerCAmelCase : str = self.image_size // 2
_lowerCAmelCase : Optional[Any] = pixel_values[:, :, :image_size, :image_size]
_lowerCAmelCase : List[str] = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
_lowerCAmelCase : int = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : List[str] = TFViTForImageClassification(a__ )
_lowerCAmelCase : Any = model(a__ , labels=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowerCAmelCase : Any = self.image_size // 2
_lowerCAmelCase : int = pixel_values[:, :, :image_size, :image_size]
_lowerCAmelCase : Dict = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Any = TFViTForImageClassification(a__ )
_lowerCAmelCase : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCamelCase : List[str] = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : List[Any] = False
def __A ( self ):
_lowerCAmelCase : List[str] = TFViTModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __A ( self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __A ( self ):
pass
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Layer ) )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(a__ )
_lowerCAmelCase : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def __A ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(a__ )
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __A ( unittest.TestCase ):
@cached_property
def __A ( self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __A ( self ):
_lowerCAmelCase : Dict = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
_lowerCAmelCase : List[Any] = self.default_image_processor
_lowerCAmelCase : Union[str, Any] = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=a__ , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase : Tuple = model(**a__ )
# verify the logits
_lowerCAmelCase : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
_lowerCAmelCase : Optional[Any] = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , a__ , atol=1e-4 )
| 663 |
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ) -> Dict:
_lowerCAmelCase : List[str] = int(_lowerCamelCase )
assert noofclusters < len(_lowerCamelCase )
# Find out the dimensionality
_lowerCAmelCase : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_lowerCAmelCase : Any = list(range(len(_lowerCamelCase ) ) )
shuffle(_lowerCamelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_lowerCAmelCase : List[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_lowerCAmelCase : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_lowerCAmelCase : List[str] = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowerCamelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float64""" ,[dim] )
_lowerCAmelCase : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_lowerCAmelCase : Dict = [tf.Variable(0 ) for i in range(len(_lowerCamelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_lowerCAmelCase : List[Any] = tf.placeholder("""int32""" )
_lowerCAmelCase : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowerCamelCase ,_lowerCamelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_lowerCAmelCase : Union[str, Any] = tf.placeholder("""float""" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_lowerCAmelCase : Optional[int] = tf.reduce_mean(_lowerCamelCase ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_lowerCAmelCase : Dict = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[dim] )
_lowerCAmelCase : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowerCamelCase ,_lowerCamelCase ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_lowerCAmelCase : Any = tf.placeholder("""float""" ,[noofclusters] )
_lowerCAmelCase : str = tf.argmin(_lowerCamelCase ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_lowerCAmelCase : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowerCamelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_lowerCAmelCase : List[str] = 100
for _ in range(_lowerCamelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : int = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_lowerCAmelCase : Any = [
sess.run(_lowerCamelCase ,feed_dict={va: vect, va: sess.run(_lowerCamelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_lowerCAmelCase : Any = sess.run(
_lowerCamelCase ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowerCamelCase ):
# Collect all the vectors assigned to this cluster
_lowerCAmelCase : List[Any] = [
vectors[i]
for i in range(len(_lowerCamelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_lowerCAmelCase : Optional[int] = sess.run(
_lowerCamelCase ,feed_dict={mean_input: array(_lowerCamelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_lowerCAmelCase : Optional[int] = sess.run(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sess.run(_lowerCamelCase )
return centroids, assignments
| 663 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : str = CpmAntTokenizer
_UpperCamelCase : List[Any] = False
def __A ( self ):
super().setUp()
_lowerCAmelCase : Dict = [
"""<d>""",
"""</d>""",
"""<s>""",
"""</s>""",
"""</_>""",
"""<unk>""",
"""<pad>""",
"""</n>""",
"""我""",
"""是""",
"""C""",
"""P""",
"""M""",
"""A""",
"""n""",
"""t""",
]
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
@tooslow
def __A ( self ):
_lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" )
_lowerCAmelCase : Optional[Any] = """今天天气真好!"""
_lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""]
_lowerCAmelCase : str = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Tuple = """今天天气真好!"""
_lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens
_lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
_lowerCAmelCase : Tuple = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
| 663 |
"""simple docstring"""
_a : Optional[Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a : List[str] = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a : str = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 663 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=True , a__=None , a__=True , ):
_lowerCAmelCase : List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Union[str, Any] = num_channels
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Union[str, Any] = min_resolution
_lowerCAmelCase : str = max_resolution
_lowerCAmelCase : Optional[Any] = do_resize
_lowerCAmelCase : Tuple = size
_lowerCAmelCase : Optional[Any] = do_normalize
def __A ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : Any = ImageGPTImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """clusters""" ) )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
def __A ( self ):
_lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __A ( self ):
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
_lowerCAmelCase : List[str] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , obj[key] ) )
else:
self.assertEqual(obj[key] , a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Union[str, Any] = os.path.join(a__ , """image_processor.json""" )
image_processor_first.to_json_file(a__ )
_lowerCAmelCase : str = self.image_processing_class.from_json_file(a__ ).to_dict()
_lowerCAmelCase : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a__ )
def __A ( self ):
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(a__ )
_lowerCAmelCase : str = self.image_processing_class.from_pretrained(a__ ).to_dict()
_lowerCAmelCase : str = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __A ( self ):
pass
def SCREAMING_SNAKE_CASE ( ) -> Dict:
_lowerCAmelCase : str = load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_lowerCAmelCase : List[Any] = Image.open(dataset[4]["""file"""] )
_lowerCAmelCase : Tuple = Image.open(dataset[5]["""file"""] )
_lowerCAmelCase : List[str] = [imagea, imagea]
return images
@require_vision
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_lowerCAmelCase : Dict = prepare_images()
# test non-batched
_lowerCAmelCase : Tuple = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
_lowerCAmelCase : Optional[int] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , a__ )
# test batched
_lowerCAmelCase : List[Any] = image_processing(a__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
_lowerCAmelCase : List[Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , a__ )
| 663 |
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __A :
def __init__( self , a__ , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Tuple = 13
_lowerCAmelCase : Tuple = 7
_lowerCAmelCase : Any = 30
_lowerCAmelCase : Optional[int] = self.seq_length + self.mem_len
_lowerCAmelCase : Dict = 15
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : List[str] = 99
_lowerCAmelCase : List[Any] = [10, 50, 80]
_lowerCAmelCase : Tuple = 32
_lowerCAmelCase : int = 32
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : List[str] = 8
_lowerCAmelCase : Tuple = 128
_lowerCAmelCase : Any = 2
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[Any] = 3
_lowerCAmelCase : Optional[int] = self.vocab_size - 1
_lowerCAmelCase : Dict = 0.0_1
def __A ( self ):
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __A ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = TFTransfoXLModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids_a, """mems""": mems_a}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = model(a__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = TFTransfoXLLMHeadModel(a__ )
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase : Dict = {"""input_ids""": input_ids_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : str = model(a__ ).to_tuple()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
_lowerCAmelCase : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model(a__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __A ( self , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Tuple = TFTransfoXLForSequenceClassification(a__ )
_lowerCAmelCase : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self ):
_lowerCAmelCase : str = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = config_and_inputs
_lowerCAmelCase : List[Any] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Dict = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : str = False
_UpperCamelCase : str = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Any = False
def __A ( self , a__ , a__ , a__ , a__ , a__ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLModelTester(self )
_lowerCAmelCase : List[Any] = ConfigTester(self , config_class=a__ , d_embed=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a__ )
def __A ( self ):
self.model_tester.set_seed()
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(a__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_lowerCAmelCase : str = model.get_output_embeddings()
assert isinstance(a__ , tf.keras.layers.Layer )
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
else:
_lowerCAmelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
_lowerCAmelCase : Optional[int] = model.get_bias()
assert name is None
def __A ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __A ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Union[str, Any] = TFTransfoXLModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def __A ( self ):
pass
@require_tf
class __A ( unittest.TestCase ):
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def __A ( self ):
_lowerCAmelCase : Tuple = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
_lowerCAmelCase : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_lowerCAmelCase : List[Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_lowerCAmelCase : Tuple = model.generate(a__ , max_length=200 , do_sample=a__ )
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
| 663 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Dict = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 663 | 1 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_a : List[str] = 'src/diffusers'
_a : int = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_a : Tuple = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_a : Dict = spec.loader.load_module()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : List[Any] ) -> Optional[Any]:
return line.startswith(_lowerCamelCase ) or len(_lowerCamelCase ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" ,_lowerCamelCase ) is not None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> List[str]:
_lowerCAmelCase : Dict = object_name.split(""".""" )
_lowerCAmelCase : Union[str, Any] = 0
# First let's find the module where our object lives.
_lowerCAmelCase : List[str] = parts[i]
while i < len(_lowerCamelCase ) and not os.path.isfile(os.path.join(_lowerCamelCase ,f"{module}.py" ) ):
i += 1
if i < len(_lowerCamelCase ):
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase ,parts[i] )
if i >= len(_lowerCamelCase ):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(_lowerCamelCase ,f"{module}.py" ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_lowerCAmelCase : List[Any] = f.readlines()
# Now let's find the class / func in the code!
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_lowerCamelCase ) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)" ,lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_lowerCamelCase ):
raise ValueError(f" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_lowerCAmelCase : Tuple = line_index
while line_index < len(_lowerCamelCase ) and _should_continue(lines[line_index] ,_lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase : int = lines[start_index:line_index]
return "".join(_lowerCamelCase )
_a : Dict = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_a : str = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
_a : List[str] = re.compile(r'<FILL\s+[^>]*>')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ) -> Tuple:
_lowerCAmelCase : int = code.split("""\n""" )
_lowerCAmelCase : List[str] = 0
while idx < len(_lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_lowerCamelCase ):
return re.search(r"""^(\s*)\S""" ,lines[idx] ).groups()[0]
return ""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
_lowerCAmelCase : str = len(get_indent(_lowerCamelCase ) ) > 0
if has_indent:
_lowerCAmelCase : str = f"class Bla:\n{code}"
_lowerCAmelCase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ,preview=_lowerCamelCase )
_lowerCAmelCase : List[str] = black.format_str(_lowerCamelCase ,mode=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Tuple = style_docstrings_in_code(_lowerCamelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : str=False ) -> str:
with open(_lowerCamelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
_lowerCAmelCase : Any = f.readlines()
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = search.groups()
_lowerCAmelCase : str = find_code_in_diffusers(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = get_indent(_lowerCamelCase )
_lowerCAmelCase : str = line_index + 1 if indent == theoretical_indent else line_index + 2
_lowerCAmelCase : Tuple = theoretical_indent
_lowerCAmelCase : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_lowerCAmelCase : List[str] = True
while line_index < len(_lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_lowerCamelCase ):
break
_lowerCAmelCase : Any = lines[line_index]
_lowerCAmelCase : Tuple = _should_continue(_lowerCamelCase ,_lowerCamelCase ) and re.search(f"^{indent}# End copy" ,_lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_lowerCAmelCase : Optional[int] = lines[start_index:line_index]
_lowerCAmelCase : str = """""".join(_lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_lowerCAmelCase : Union[str, Any] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_lowerCamelCase ) is None]
_lowerCAmelCase : Optional[Any] = """\n""".join(_lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Optional[Any] = replace_pattern.replace("""with""" ,"""""" ).split(""",""" )
_lowerCAmelCase : Any = [_re_replace_pattern.search(_lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = pattern.groups()
_lowerCAmelCase : Optional[Any] = re.sub(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if option.strip() == "all-casing":
_lowerCAmelCase : List[Any] = re.sub(obja.lower() ,obja.lower() ,_lowerCamelCase )
_lowerCAmelCase : int = re.sub(obja.upper() ,obja.upper() ,_lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_lowerCAmelCase : Dict = blackify(lines[start_index - 1] + theoretical_code )
_lowerCAmelCase : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_lowerCAmelCase : Tuple = lines[:start_index] + [theoretical_code] + lines[line_index:]
_lowerCAmelCase : Optional[Any] = start_index + 1
if overwrite and len(_lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}." )
with open(_lowerCamelCase ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
f.writelines(_lowerCamelCase )
return diffs
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : bool = False ) -> str:
_lowerCAmelCase : Optional[Any] = glob.glob(os.path.join(_lowerCamelCase ,"""**/*.py""" ) ,recursive=_lowerCamelCase )
_lowerCAmelCase : str = []
for filename in all_files:
_lowerCAmelCase : Union[str, Any] = is_copy_consistent(_lowerCamelCase ,_lowerCamelCase )
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(_lowerCamelCase ) > 0:
_lowerCAmelCase : List[str] = """\n""".join(_lowerCamelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_a : int = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_a : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = DiTPipeline
_UpperCamelCase : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCamelCase : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Union[str, Any] = False
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=a__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=a__ , )
_lowerCAmelCase : Optional[int] = AutoencoderKL()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler()
_lowerCAmelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : Any = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Any = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : Tuple = self.get_dummy_components()
_lowerCAmelCase : Optional[int] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs(a__ )
_lowerCAmelCase : List[str] = pipe(**a__ ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase : List[Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
_lowerCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def __A ( self ):
self._test_inference_batch_single_identical(relax_max_difference=a__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __A ( unittest.TestCase ):
def __A ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : int = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_lowerCAmelCase : Dict = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_lowerCAmelCase : Union[str, Any] = pipe.get_label_ids(a__ )
_lowerCAmelCase : Any = pipe(a__ , generator=a__ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
F"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1e-2
def __A ( self ):
_lowerCAmelCase : str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_lowerCAmelCase : List[str] = ["""vase""", """umbrella"""]
_lowerCAmelCase : Optional[int] = pipe.get_label_ids(a__ )
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : List[str] = pipe(a__ , generator=a__ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(a__ , a__ ):
_lowerCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1e-1
| 663 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list ,_lowerCamelCase : list ,_lowerCamelCase : int ) -> list:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : Dict = [[0] * n for i in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = y_points[i]
for i in range(2 ,_lowerCamelCase ):
for j in range(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 663 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
_UpperCamelCase : ClassVar[Features] = Features({"audio": Audio()} )
_UpperCamelCase : ClassVar[Features] = Features({"transcription": Value("string" )} )
_UpperCamelCase : str = "audio"
_UpperCamelCase : str = "transcription"
def __A ( self , a__ ):
if self.audio_column not in features:
raise ValueError(F"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , a__ ):
raise ValueError(F"Column {self.audio_column} is not an Audio type." )
_lowerCAmelCase : Any = copy.deepcopy(self )
_lowerCAmelCase : List[Any] = self.input_schema.copy()
_lowerCAmelCase : Tuple = features[self.audio_column]
_lowerCAmelCase : Optional[Any] = input_schema
return task_template
@property
def __A ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 663 |
"""simple docstring"""
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> List[Any]:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[int] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
_lowerCAmelCase : List[str] = subprocess.run(_lowerCamelCase ,shell=_lowerCamelCase ,stdout=subprocess.PIPE )
_lowerCAmelCase : int = output.stdout.decode("""utf-8""" )
_lowerCAmelCase : Tuple = json.loads(_lowerCamelCase )
_lowerCAmelCase : int = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowerCamelCase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" ,"""w""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return values.split(""",""" )
_a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_a : Tuple = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 663 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : List[str] ,_lowerCamelCase : Dict ,_lowerCamelCase : Tuple ) -> Tuple:
# Return True if there is node that has not iterated.
_lowerCAmelCase : Dict = [False] * len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
queue.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = True
while queue:
_lowerCAmelCase : List[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCAmelCase : Any = True
_lowerCAmelCase : Any = u
return visited[t]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[Any] ) -> Dict:
# This array is filled by BFS and to store path
_lowerCAmelCase : Any = [-1] * (len(_lowerCamelCase ))
_lowerCAmelCase : List[str] = 0
while bfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = float("""Inf""" )
_lowerCAmelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_lowerCAmelCase : List[str] = min(_lowerCamelCase ,graph[parent[s]][s] )
_lowerCAmelCase : Any = parent[s]
max_flow += path_flow
_lowerCAmelCase : Union[str, Any] = sink
while v != source:
_lowerCAmelCase : Optional[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCAmelCase : Optional[int] = parent[v]
return max_flow
_a : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_a , _a : str = 0, 5
print(ford_fulkerson(graph, source, sink))
| 663 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 663 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Union[str, Any] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_lowerCAmelCase : List[Any] = 6
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Optional[int] = 1901
_lowerCAmelCase : Optional[Any] = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_lowerCAmelCase : Dict = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_lowerCAmelCase : List[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_lowerCAmelCase : Tuple = day - days_per_month[month - 2]
if month > 12:
year += 1
_lowerCAmelCase : str = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 663 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int:
_lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )]
for i in range(2 ,limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i ,limit + 1 ,_lowerCamelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 663 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=True , a__=None , a__=True , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , a__=False , ):
_lowerCAmelCase : int = size if size is not None else {"""height""": 20, """width""": 20}
_lowerCAmelCase : Any = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : Dict = min_resolution
_lowerCAmelCase : Any = max_resolution
_lowerCAmelCase : Any = do_resize
_lowerCAmelCase : Dict = size
_lowerCAmelCase : Optional[Any] = do_center_crop
_lowerCAmelCase : int = crop_size
_lowerCAmelCase : Dict = do_normalize
_lowerCAmelCase : int = image_mean
_lowerCAmelCase : List[str] = image_std
_lowerCAmelCase : Optional[int] = do_reduce_labels
def __A ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
_lowerCAmelCase : Optional[Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
_lowerCAmelCase : List[str] = Image.open(dataset[0]["""file"""] )
_lowerCAmelCase : List[Any] = Image.open(dataset[1]["""file"""] )
return image, map
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : Dict = load_dataset("""hf-internal-testing/fixtures_ade20k""" ,split="""test""" )
_lowerCAmelCase : Union[str, Any] = Image.open(ds[0]["""file"""] )
_lowerCAmelCase : int = Image.open(ds[1]["""file"""] )
_lowerCAmelCase : int = Image.open(ds[2]["""file"""] )
_lowerCAmelCase : str = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = BeitImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = BeitImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
self.assertTrue(hasattr(a__ , """do_center_crop""" ) )
self.assertTrue(hasattr(a__ , """center_crop""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """image_mean""" ) )
self.assertTrue(hasattr(a__ , """image_std""" ) )
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
_lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=a__ )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
self.assertEqual(image_processor.do_reduce_labels , a__ )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : Union[str, Any] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
_lowerCAmelCase : Optional[int] = []
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_lowerCAmelCase : Optional[Any] = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched
_lowerCAmelCase : Optional[Any] = image_processing(a__ , a__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test not batched input (PIL images)
_lowerCAmelCase , _lowerCAmelCase : Dict = prepare_semantic_single_inputs()
_lowerCAmelCase : Tuple = image_processing(a__ , a__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
# Test batched input (PIL images)
_lowerCAmelCase , _lowerCAmelCase : Dict = prepare_semantic_batch_inputs()
_lowerCAmelCase : List[Any] = image_processing(a__ , a__ , return_tensors="""pt""" )
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowerCAmelCase , _lowerCAmelCase : int = prepare_semantic_single_inputs()
_lowerCAmelCase : Tuple = image_processing(a__ , a__ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 150 )
_lowerCAmelCase : Union[str, Any] = True
_lowerCAmelCase : Any = image_processing(a__ , a__ , return_tensors="""pt""" )
self.assertTrue(encoding["""labels"""].min().item() >= 0 )
self.assertTrue(encoding["""labels"""].max().item() <= 255 )
| 663 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 663 | 1 |
"""simple docstring"""
from math import factorial, radians
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : float ,_lowerCamelCase : int = 18 ,_lowerCamelCase : int = 10 ) -> float:
_lowerCAmelCase : str = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
_lowerCAmelCase : Dict = radians(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = angle_in_radians
_lowerCAmelCase : Union[str, Any] = 3
_lowerCAmelCase : List[str] = -1
for _ in range(_lowerCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
__import__('doctest').testmod()
| 663 |
"""simple docstring"""
from PIL import Image
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ,_lowerCamelCase : int ) -> Image:
_lowerCAmelCase : Any = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_lowerCamelCase : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_lowerCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
_a : str = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __A :
def __init__( self , a__ = 6 ):
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
self.create_linked_list(a__ )
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = Node()
_lowerCAmelCase : Any = current_node
_lowerCAmelCase : Optional[Any] = current_node
_lowerCAmelCase : Any = current_node
for _ in range(1 , a__ ):
_lowerCAmelCase : Dict = Node()
_lowerCAmelCase : Union[str, Any] = current_node
_lowerCAmelCase : Optional[Any] = previous_node
_lowerCAmelCase : Optional[Any] = current_node
_lowerCAmelCase : List[str] = self.front
_lowerCAmelCase : Dict = previous_node
def __A ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __A ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def __A ( self , a__ ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
_lowerCAmelCase : Dict = self.rear.next
if self.rear:
_lowerCAmelCase : int = data
def __A ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
_lowerCAmelCase : Tuple = self.front.data
_lowerCAmelCase : Tuple = None
return data
_lowerCAmelCase : Optional[int] = self.front
_lowerCAmelCase : List[Any] = old_front.next
_lowerCAmelCase : int = old_front.data
_lowerCAmelCase : Dict = None
return data
def __A ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def __A ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __A :
def __init__( self ):
_lowerCAmelCase : Any | None = None
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 663 |
"""simple docstring"""
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A ( SCREAMING_SNAKE_CASE_ ):
pass
class __A :
def __init__( self ):
_lowerCAmelCase : Union[str, Any] = [
[],
[],
[],
]
def __A ( self , a__ , a__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(a__ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __A ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class __A :
def __init__( self ):
_lowerCAmelCase : int = []
def __A ( self , a__ ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(a__ )
def __A ( self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
_lowerCAmelCase : int = min(self.queue )
self.queue.remove(a__ )
return data
def __str__( self ):
return str(self.queue )
def SCREAMING_SNAKE_CASE ( ) -> str:
_lowerCAmelCase : Union[str, Any] = FixedPriorityQueue()
fpq.enqueue(0 ,10 )
fpq.enqueue(1 ,70 )
fpq.enqueue(0 ,100 )
fpq.enqueue(2 ,1 )
fpq.enqueue(2 ,5 )
fpq.enqueue(1 ,7 )
fpq.enqueue(2 ,4 )
fpq.enqueue(1 ,64 )
fpq.enqueue(0 ,128 )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_lowerCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_lowerCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 663 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger('transformers.models.speecht5')
_a : List[str] = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_a : Optional[int] = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_a : Tuple = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_a : List[str] = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_a : Dict = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_a : Any = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_a : Dict = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_a : Any = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_a : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_a : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_a : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_a : Optional[Any] = []
_a : Any = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_a : Optional[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_a : Union[str, Any] = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_a : List[Any] = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : str ,_lowerCamelCase : List[Any] ) -> Optional[int]:
for attribute in key.split(""".""" ):
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase ,_lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : str = getattr(_lowerCamelCase ,_lowerCamelCase ).shape
else:
_lowerCAmelCase : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_g":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Tuple = value
elif weight_type == "running_mean":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "running_var":
_lowerCAmelCase : str = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase : Dict = value
else:
_lowerCAmelCase : List[Any] = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ) -> Dict:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ,_lowerCamelCase : Union[str, Any] ) -> str:
_lowerCAmelCase : str = []
if task == "s2t":
_lowerCAmelCase : Tuple = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase : str = MAPPING_S2T
_lowerCAmelCase : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : Optional[int] = MAPPING_T2S
_lowerCAmelCase : Dict = IGNORE_KEYS_T2S
elif task == "s2s":
_lowerCAmelCase : Dict = hf_model.speechta.encoder.prenet.feature_encoder
_lowerCAmelCase : str = MAPPING_S2S
_lowerCAmelCase : List[str] = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCamelCase ,_lowerCamelCase ):
logger.info(f"{name} was ignored" )
continue
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,hf_model.config.feat_extract_norm == """group""" ,)
_lowerCAmelCase : int = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
_lowerCAmelCase : List[Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_lowerCAmelCase : List[str] = True
if "*" in mapped_key:
_lowerCAmelCase : Optional[Any] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : List[Any] = mapped_key.replace("""*""" ,_lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : Tuple = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : Optional[int] = """weight_v"""
elif "bias" in name:
_lowerCAmelCase : Dict = """bias"""
elif "weight" in name:
_lowerCAmelCase : Dict = """weight"""
elif "running_mean" in name:
_lowerCAmelCase : str = """running_mean"""
elif "running_var" in name:
_lowerCAmelCase : int = """running_var"""
elif "num_batches_tracked" in name:
_lowerCAmelCase : Optional[int] = """num_batches_tracked"""
else:
_lowerCAmelCase : Union[str, Any] = None
set_recursively(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Any ) -> Optional[int]:
_lowerCAmelCase : List[str] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : List[Any] = name.split(""".""" )
_lowerCAmelCase : str = int(items[0] )
_lowerCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase : Any = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Any=None ,_lowerCamelCase : Optional[Any]=None ,_lowerCamelCase : Optional[int]=None ,) -> Union[str, Any]:
if config_path is not None:
_lowerCAmelCase : Dict = SpeechTaConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Optional[Any] = SpeechTaConfig()
if task == "s2t":
_lowerCAmelCase : List[str] = config.max_text_positions
_lowerCAmelCase : Tuple = SpeechTaForSpeechToText(_lowerCamelCase )
elif task == "t2s":
_lowerCAmelCase : Union[str, Any] = 1876
_lowerCAmelCase : Tuple = 600
_lowerCAmelCase : Any = config.max_speech_positions
_lowerCAmelCase : Optional[int] = SpeechTaForTextToSpeech(_lowerCamelCase )
elif task == "s2s":
_lowerCAmelCase : int = 1876
_lowerCAmelCase : Optional[int] = config.max_speech_positions
_lowerCAmelCase : Optional[int] = SpeechTaForSpeechToSpeech(_lowerCamelCase )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
_lowerCAmelCase : List[str] = SpeechTaTokenizer(_lowerCamelCase ,model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_lowerCAmelCase : int = AddedToken("""<mask>""" ,lstrip=_lowerCamelCase ,rstrip=_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
_lowerCAmelCase : str = SpeechTaFeatureExtractor()
_lowerCAmelCase : Optional[int] = SpeechTaProcessor(tokenizer=_lowerCamelCase ,feature_extractor=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : List[str] = torch.load(_lowerCamelCase )
recursively_load_weights(fairseq_checkpoint["""model"""] ,_lowerCamelCase ,_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_a : Optional[int] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 663 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
_UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
_lowerCAmelCase : Optional[int] = np.zeros((34000,) )
_lowerCAmelCase : Optional[Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def __A ( self , a__ , a__ ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = examples
_lowerCAmelCase : List[Any] = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
_lowerCAmelCase : Tuple = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __A ( self , a__ ):
import datasets
# test with a local file
_lowerCAmelCase : int = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
_lowerCAmelCase : List[Any] = dataset[0]["""audio"""]["""array"""]
_lowerCAmelCase : str = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __A ( self ):
_lowerCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier"""
_lowerCAmelCase : Optional[Any] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : Any = np.ones((8000,) )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
_lowerCAmelCase : List[str] = [
{"""score""": 0.0_8_4_2, """label""": """no"""},
{"""score""": 0.0_8_3_8, """label""": """up"""},
{"""score""": 0.0_8_3_7, """label""": """go"""},
{"""score""": 0.0_8_3_4, """label""": """right"""},
]
_lowerCAmelCase : str = [
{"""score""": 0.0_8_4_5, """label""": """stop"""},
{"""score""": 0.0_8_4_4, """label""": """on"""},
{"""score""": 0.0_8_4_1, """label""": """right"""},
{"""score""": 0.0_8_3_4, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
_lowerCAmelCase : int = {"""array""": np.ones((8000,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
_lowerCAmelCase : int = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ):
import datasets
_lowerCAmelCase : Optional[Any] = """superb/wav2vec2-base-superb-ks"""
_lowerCAmelCase : List[str] = pipeline("""audio-classification""" , model=a__ )
_lowerCAmelCase : str = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
_lowerCAmelCase : Optional[Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
_lowerCAmelCase : List[str] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_8_1, """label""": """go"""},
{"""score""": 0.0_0_7, """label""": """up"""},
{"""score""": 0.0_0_6, """label""": """_unknown_"""},
{"""score""": 0.0_0_1, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ):
pass
| 663 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
_a : Union[str, Any] = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_a : Optional[Any] = BASE_URL + '/user'
# https://github.com/settings/tokens
_a : List[Any] = os.environ.get('USER_TOKEN', '')
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> dict[Any, Any]:
_lowerCAmelCase : Dict = {
"""Authorization""": f"token {auth_token}",
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(_lowerCamelCase ,headers=_lowerCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 663 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_a : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Dict ,_lowerCamelCase : Dict=8 ) -> Any:
_lowerCAmelCase : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Any=512 ,_lowerCamelCase : Dict=512 ) -> List[Any]:
_lowerCAmelCase : Any = pil_image.resize((w, h) ,resample=Image.BICUBIC ,reducing_gap=1 )
_lowerCAmelCase : Dict = np.array(pil_image.convert("""RGB""" ) )
_lowerCAmelCase : List[str] = arr.astype(np.floataa ) / 1_27.5 - 1
_lowerCAmelCase : int = np.transpose(_lowerCamelCase ,[2, 0, 1] )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ , a__ , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __A ( self , a__ , a__ , a__ ):
# get the original timestep using init_timestep
_lowerCAmelCase : Optional[Any] = min(int(num_inference_steps * strength ) , a__ )
_lowerCAmelCase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowerCAmelCase : Dict = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__=None ):
if not isinstance(a__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(a__ )}" )
_lowerCAmelCase : Union[str, Any] = image.to(device=a__ , dtype=a__ )
_lowerCAmelCase : int = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCAmelCase : int = image
else:
if isinstance(a__ , a__ ) and len(a__ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(a__ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(a__ , a__ ):
_lowerCAmelCase : Optional[int] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a__ )
]
_lowerCAmelCase : Optional[int] = torch.cat(a__ , dim=0 )
else:
_lowerCAmelCase : List[Any] = self.movq.encode(a__ ).latent_dist.sample(a__ )
_lowerCAmelCase : Dict = self.movq.config.scaling_factor * init_latents
_lowerCAmelCase : str = torch.cat([init_latents] , dim=0 )
_lowerCAmelCase : Dict = init_latents.shape
_lowerCAmelCase : str = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
# get latents
_lowerCAmelCase : Optional[Any] = self.scheduler.add_noise(a__ , a__ , a__ )
_lowerCAmelCase : int = init_latents
return latents
def __A ( self , a__=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(F"cuda:{gpu_id}" )
_lowerCAmelCase : int = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __A ( self , a__=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Optional[int] = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : List[str] = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
_lowerCAmelCase : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 100 , a__ = 4.0 , a__ = 0.3 , a__ = 1 , a__ = None , a__ = "pil" , a__ = True , ):
_lowerCAmelCase : Dict = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = torch.cat(a__ , dim=0 )
_lowerCAmelCase : Dict = image_embeds.shape[0]
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = torch.cat(a__ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : int = image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Any = negative_image_embeds.repeat_interleave(a__ , dim=0 )
_lowerCAmelCase : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
if not isinstance(a__ , a__ ):
_lowerCAmelCase : Any = [image]
if not all(isinstance(a__ , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"Input is in incorrect format: {[type(a__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
_lowerCAmelCase : Tuple = torch.cat([prepare_image(a__ , a__ , a__ ) for i in image] , dim=0 )
_lowerCAmelCase : Union[str, Any] = image.to(dtype=image_embeds.dtype , device=a__ )
_lowerCAmelCase : Union[str, Any] = self.movq.encode(a__ )["""latents"""]
_lowerCAmelCase : Tuple = latents.repeat_interleave(a__ , dim=0 )
self.scheduler.set_timesteps(a__ , device=a__ )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(a__ , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCAmelCase , _lowerCAmelCase : Dict = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
_lowerCAmelCase : List[str] = self.prepare_latents(
a__ , a__ , a__ , a__ , image_embeds.dtype , a__ , a__ )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = {"""image_embeds""": image_embeds}
_lowerCAmelCase : List[str] = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = variance_pred.chunk(2 )
_lowerCAmelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : List[str] = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(a__ , force_not_quantize=a__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase : List[Any] = image * 0.5 + 0.5
_lowerCAmelCase : Any = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 663 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.